[lxc-devel] [lxd/master] [TESTING] clustering

stgraber on Github lxc-bot at linuxcontainers.org
Thu Jan 25 23:06:41 UTC 2018


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20180125/371acd6c/attachment.bin>
-------------- next part --------------
From cb2163333f33ba43eaf674b204106a26fcba7940 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 14 Sep 2017 13:13:55 +0000
Subject: [PATCH 001/227] Add raft_nodes table

This new table is meant to hold addresses of LXD nodes that are
partecipating to the dqlite raft cluster. Each node in the cluster
will hold its own local copy of this table, regardless of whether it's
a raft node or not.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/node/schema.go          |  7 ++++++-
 lxd/db/node/update.go          | 31 +++++++++++++++++++++++++++++++
 lxd/db/node/update_test.go     | 17 +++++++++++++++++
 test/suites/database_update.sh |  2 +-
 4 files changed, 55 insertions(+), 2 deletions(-)
 create mode 100644 lxd/db/node/update_test.go

diff --git a/lxd/db/node/schema.go b/lxd/db/node/schema.go
index cbf863e1c..a9754eeaa 100644
--- a/lxd/db/node/schema.go
+++ b/lxd/db/node/schema.go
@@ -155,6 +155,11 @@ CREATE TABLE profiles_devices_config (
     UNIQUE (profile_device_id, key),
     FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE CASCADE
 );
+CREATE TABLE raft_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    address TEXT NOT NULL,
+    UNIQUE (address)
+);
 CREATE TABLE storage_pools (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name VARCHAR(255) NOT NULL,
@@ -188,5 +193,5 @@ CREATE TABLE storage_volumes_config (
     FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE
 );
 
-INSERT INTO schema (version, updated_at) VALUES (36, strftime("%s"))
+INSERT INTO schema (version, updated_at) VALUES (37, strftime("%s"))
 `
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index 299a645e4..95a660202 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -84,9 +84,40 @@ var updates = map[int]schema.Update{
 	34: updateFromV33,
 	35: updateFromV34,
 	36: updateFromV35,
+	37: updateFromV36,
 }
 
 // Schema updates begin here
+
+// Add a raft_nodes table to be used when running in clustered mode. It lists
+// the current nodes in the LXD cluster that are participating to the dqlite
+// database Raft cluster.
+//
+// The 'id' column contains the raft server ID of the database node, and the
+// 'address' column its network address. Both are used internally by the raft
+// Go package to manage the cluster.
+//
+// Typical setups will have 3 LXD cluster nodes that participate to the dqlite
+// database Raft cluster, and an arbitrary number of additional LXD cluster
+// nodes that don't. Non-database nodes are not tracked in this table, but rather
+// in the nodes table of the cluster database itself.
+//
+// The data in this table must be replicated by LXD on all nodes of the
+// cluster, regardless of whether they are part of the raft cluster or not, and
+// all nodes will consult this table when they need to find out a leader to
+// send SQL queries to.
+func updateFromV36(tx *sql.Tx) error {
+	stmts := `
+CREATE TABLE raft_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    address TEXT NOT NULL,
+    UNIQUE (address)
+);
+`
+	_, err := tx.Exec(stmts)
+	return err
+}
+
 func updateFromV35(tx *sql.Tx) error {
 	stmts := `
 CREATE TABLE tmp (
diff --git a/lxd/db/node/update_test.go b/lxd/db/node/update_test.go
new file mode 100644
index 000000000..980ef8bf3
--- /dev/null
+++ b/lxd/db/node/update_test.go
@@ -0,0 +1,17 @@
+package node_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db/node"
+	"github.com/stretchr/testify/require"
+)
+
+func TestUpdateFromV36(t *testing.T) {
+	schema := node.Schema()
+	db, err := schema.ExerciseUpdate(37, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO raft_nodes VALUES (1, '1.2.3.4:666')")
+	require.NoError(t, err)
+}
diff --git a/test/suites/database_update.sh b/test/suites/database_update.sh
index 7b3737486..15189bd2f 100644
--- a/test/suites/database_update.sh
+++ b/test/suites/database_update.sh
@@ -9,7 +9,7 @@ test_database_update(){
   spawn_lxd "${LXD_MIGRATE_DIR}" true
 
   # Assert there are enough tables.
-  expected_tables=23
+  expected_tables=24
   tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE")
   [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; }
 

From 95dd0048be72b2da315a8d8850f914454b23e79c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 11 Oct 2017 15:05:22 +0000
Subject: [PATCH 002/227] Add query helpers to select and insert complex
 objects

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/query/objects.go      |  85 ++++++++++++++++++++
 lxd/db/query/objects_test.go | 187 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 272 insertions(+)
 create mode 100644 lxd/db/query/objects.go
 create mode 100644 lxd/db/query/objects_test.go

diff --git a/lxd/db/query/objects.go b/lxd/db/query/objects.go
new file mode 100644
index 000000000..f6dcdad09
--- /dev/null
+++ b/lxd/db/query/objects.go
@@ -0,0 +1,85 @@
+package query
+
+import (
+	"database/sql"
+	"fmt"
+	"strings"
+)
+
+// SelectObjects executes a statement which must yield rows with a specific
+// columns schema. It invokes the given Dest hook for each yielded row.
+func SelectObjects(tx *sql.Tx, dest Dest, query string, args ...interface{}) error {
+	rows, err := tx.Query(query, args...)
+	if err != nil {
+		return err
+	}
+	defer rows.Close()
+
+	for i := 0; rows.Next(); i++ {
+		err := rows.Scan(dest(i)...)
+		if err != nil {
+			return err
+		}
+	}
+
+	err = rows.Err()
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// Dest is a function that is expected to return the objects to pass to the
+// 'dest' argument of sql.Rows.Scan(). It is invoked by SelectObjects once per
+// yielded row, and it will be passed the index of the row being scanned.
+type Dest func(i int) []interface{}
+
+// UpsertObject inserts or replaces a new row with the given column values, to
+// the given table using columns order. For example:
+//
+// UpsertObject(tx, "cars", []string{"id", "brand"}, []interface{}{1, "ferrari"})
+//
+// The number of elements in 'columns' must match the one in 'values'.
+func UpsertObject(tx *sql.Tx, table string, columns []string, values []interface{}) (int64, error) {
+	n := len(columns)
+	if n == 0 {
+		return -1, fmt.Errorf("columns length is zero")
+	}
+	if n != len(values) {
+		return -1, fmt.Errorf("columns length does not match values length")
+	}
+
+	stmt := fmt.Sprintf(
+		"INSERT OR REPLACE INTO %s (%s) VALUES %s",
+		table, strings.Join(columns, ", "), exprParams(n))
+	result, err := tx.Exec(stmt, values...)
+	if err != nil {
+		return -1, err
+	}
+	id, err := result.LastInsertId()
+	if err != nil {
+		return -1, err
+	}
+	return id, nil
+}
+
+// DeleteObject removes the row identified by the given ID. The given table
+// must have a primary key column called 'id'.
+//
+// It returns a flag indicating if a matching row was actually found and
+// deleted or not.
+func DeleteObject(tx *sql.Tx, table string, id int64) (bool, error) {
+	stmt := fmt.Sprintf("DELETE FROM %s WHERE id=?", table)
+	result, err := tx.Exec(stmt, id)
+	if err != nil {
+		return false, err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return false, err
+	}
+	if n > 1 {
+		return true, fmt.Errorf("more than one row was deleted")
+	}
+	return n == 1, nil
+}
diff --git a/lxd/db/query/objects_test.go b/lxd/db/query/objects_test.go
new file mode 100644
index 000000000..d6bda9eb2
--- /dev/null
+++ b/lxd/db/query/objects_test.go
@@ -0,0 +1,187 @@
+package query_test
+
+import (
+	"database/sql"
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/mpvl/subtest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Exercise possible failure modes.
+func TestSelectObjects_Error(t *testing.T) {
+	cases := []struct {
+		dest  query.Dest
+		query string
+		error string
+	}{
+		{
+			func(int) []interface{} { return nil },
+			"garbage",
+			"near \"garbage\": syntax error",
+		},
+		{
+			func(int) []interface{} { return make([]interface{}, 1) },
+			"SELECT id, name FROM test",
+			"sql: expected 2 destination arguments in Scan, not 1",
+		},
+	}
+	for _, c := range cases {
+		subtest.Run(t, c.query, func(t *testing.T) {
+			tx := newTxForObjects(t)
+			err := query.SelectObjects(tx, c.dest, c.query)
+			assert.EqualError(t, err, c.error)
+		})
+	}
+}
+
+// Scan rows yielded by the query.
+func TestSelectObjects(t *testing.T) {
+	tx := newTxForObjects(t)
+	objects := make([]struct {
+		ID   int
+		Name string
+	}, 1)
+	object := objects[0]
+
+	dest := func(i int) []interface{} {
+		require.Equal(t, 0, i, "expected at most one row to be yielded")
+		return []interface{}{&object.ID, &object.Name}
+	}
+
+	stmt := "SELECT id, name FROM test WHERE name=?"
+	err := query.SelectObjects(tx, dest, stmt, "bar")
+	require.NoError(t, err)
+
+	assert.Equal(t, 1, object.ID)
+	assert.Equal(t, "bar", object.Name)
+}
+
+// Exercise possible failure modes.
+func TestUpsertObject_Error(t *testing.T) {
+	cases := []struct {
+		columns []string
+		values  []interface{}
+		error   string
+	}{
+		{
+			[]string{},
+			[]interface{}{},
+			"columns length is zero",
+		},
+		{
+			[]string{"id"},
+			[]interface{}{2, "egg"},
+			"columns length does not match values length",
+		},
+	}
+	for _, c := range cases {
+		subtest.Run(t, c.error, func(t *testing.T) {
+			tx := newTxForObjects(t)
+			id, err := query.UpsertObject(tx, "foo", c.columns, c.values)
+			assert.Equal(t, int64(-1), id)
+			assert.EqualError(t, err, c.error)
+		})
+	}
+}
+
+// Insert a new row.
+func TestUpsertObject_Insert(t *testing.T) {
+	tx := newTxForObjects(t)
+
+	id, err := query.UpsertObject(tx, "test", []string{"name"}, []interface{}{"egg"})
+	require.NoError(t, err)
+	assert.Equal(t, int64(2), id)
+
+	objects := make([]struct {
+		ID   int
+		Name string
+	}, 1)
+	object := objects[0]
+
+	dest := func(i int) []interface{} {
+		require.Equal(t, 0, i, "expected at most one row to be yielded")
+		return []interface{}{&object.ID, &object.Name}
+	}
+
+	stmt := "SELECT id, name FROM test WHERE name=?"
+	err = query.SelectObjects(tx, dest, stmt, "egg")
+	require.NoError(t, err)
+
+	assert.Equal(t, 2, object.ID)
+	assert.Equal(t, "egg", object.Name)
+}
+
+// Update an existing row.
+func TestUpsertObject_Update(t *testing.T) {
+	tx := newTxForObjects(t)
+
+	id, err := query.UpsertObject(tx, "test", []string{"id", "name"}, []interface{}{1, "egg"})
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), id)
+
+	objects := make([]struct {
+		ID   int
+		Name string
+	}, 1)
+	object := objects[0]
+
+	dest := func(i int) []interface{} {
+		require.Equal(t, 0, i, "expected at most one row to be yielded")
+		return []interface{}{&object.ID, &object.Name}
+	}
+
+	stmt := "SELECT id, name FROM test WHERE name=?"
+	err = query.SelectObjects(tx, dest, stmt, "egg")
+	require.NoError(t, err)
+
+	assert.Equal(t, 1, object.ID)
+	assert.Equal(t, "egg", object.Name)
+}
+
+// Exercise possible failure modes.
+func TestDeleteObject_Error(t *testing.T) {
+	tx := newTxForObjects(t)
+
+	deleted, err := query.DeleteObject(tx, "foo", 1)
+	assert.False(t, deleted)
+	assert.EqualError(t, err, "no such table: foo")
+}
+
+// If an row was actually deleted, the returned flag is true.
+func TestDeleteObject_Deleted(t *testing.T) {
+	tx := newTxForObjects(t)
+
+	deleted, err := query.DeleteObject(tx, "test", 1)
+	assert.True(t, deleted)
+	assert.NoError(t, err)
+}
+
+// If no row was actually deleted, the returned flag is false.
+func TestDeleteObject_NotDeleted(t *testing.T) {
+	tx := newTxForObjects(t)
+
+	deleted, err := query.DeleteObject(tx, "test", 1000)
+	assert.False(t, deleted)
+	assert.NoError(t, err)
+}
+
+// Return a new transaction against an in-memory SQLite database with a single
+// test table populated with a few rows for testing object-related queries.
+func newTxForObjects(t *testing.T) *sql.Tx {
+	db, err := sql.Open("sqlite3", ":memory:")
+	assert.NoError(t, err)
+
+	_, err = db.Exec("CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT)")
+	assert.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO test VALUES (0, 'foo'), (1, 'bar')")
+	assert.NoError(t, err)
+
+	tx, err := db.Begin()
+	assert.NoError(t, err)
+
+	return tx
+}

From 650b41527ae5a37ec660382cdb0677c36d92c889 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 15 Sep 2017 07:23:30 +0000
Subject: [PATCH 003/227] Add InsertStrings helper to insert rows with a single
 string value

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/query/slices.go      | 25 +++++++++++++++++++++++++
 lxd/db/query/slices_test.go | 13 +++++++++++++
 2 files changed, 38 insertions(+)

diff --git a/lxd/db/query/slices.go b/lxd/db/query/slices.go
index 4e58126c7..59d0cc892 100644
--- a/lxd/db/query/slices.go
+++ b/lxd/db/query/slices.go
@@ -2,6 +2,8 @@ package query
 
 import (
 	"database/sql"
+	"fmt"
+	"strings"
 )
 
 // SelectStrings executes a statement which must yield rows with a single string
@@ -48,6 +50,29 @@ func SelectIntegers(tx *sql.Tx, query string) ([]int, error) {
 	return values, nil
 }
 
+// InsertStrings inserts a new row for each of the given strings, using the
+// given insert statement template, which must define exactly one insertion
+// column and one substitution placeholder for the values. For example:
+// InsertStrings(tx, "INSERT INTO foo(name) VALUES %s", []string{"bar"}).
+func InsertStrings(tx *sql.Tx, stmt string, values []string) error {
+	n := len(values)
+
+	if n == 0 {
+		return nil
+	}
+
+	params := make([]string, n)
+	args := make([]interface{}, n)
+	for i, value := range values {
+		params[i] = "(?)"
+		args[i] = value
+	}
+
+	stmt = fmt.Sprintf(stmt, strings.Join(params, ", "))
+	_, err := tx.Exec(stmt, args...)
+	return err
+}
+
 // Execute the given query and ensure that it yields rows with a single column
 // of the given database type. For every row yielded, execute the given
 // scanner.
diff --git a/lxd/db/query/slices_test.go b/lxd/db/query/slices_test.go
index f5bb6549a..36e31a5b9 100644
--- a/lxd/db/query/slices_test.go
+++ b/lxd/db/query/slices_test.go
@@ -6,6 +6,7 @@ import (
 
 	_ "github.com/mattn/go-sqlite3"
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/subtest"
@@ -51,6 +52,18 @@ func TestIntegers(t *testing.T) {
 	assert.Equal(t, []int{0, 1}, values)
 }
 
+// Insert new rows in bulk.
+func TestInsertStrings(t *testing.T) {
+	tx := newTxForSlices(t)
+
+	err := query.InsertStrings(tx, "INSERT INTO test(name) VALUES %s", []string{"xx", "yy"})
+	require.NoError(t, err)
+
+	values, err := query.SelectStrings(tx, "SELECT name FROM test ORDER BY name DESC LIMIT 2")
+	require.NoError(t, err)
+	assert.Equal(t, values, []string{"yy", "xx"})
+}
+
 // Return a new transaction against an in-memory SQLite database with a single
 // test table populated with a few rows.
 func newTxForSlices(t *testing.T) *sql.Tx {

From b586c1b5313fd0b3d7bf6275213795084f17b1fc Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sun, 1 Oct 2017 17:13:52 +0000
Subject: [PATCH 004/227] Add util.InMemoryNetwork to create in-memory
 listener/dialer pairs.

This is a convenience for creating in-memory networks that implement
the net.Conn interface. It will be used when running a node in
non-clustered mode, where there will be no actual TCP/gRCP connection
to an external dqlite node, but rather just an in-memory connection to
the local dqlite instance (which will be the leader).

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/util/net.go      | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 lxd/util/net_test.go | 18 +++++++++++++++++
 2 files changed, 75 insertions(+)

diff --git a/lxd/util/net.go b/lxd/util/net.go
index 1f96f27f4..0e368b0f4 100644
--- a/lxd/util/net.go
+++ b/lxd/util/net.go
@@ -7,6 +7,63 @@ import (
 	"github.com/lxc/lxd/shared"
 )
 
+// InMemoryNetwork creates a fully in-memory listener and dial function.
+//
+// Each time the dial function is invoked a new pair of net.Conn objects will
+// be created using net.Pipe: the listener's Accept method will unblock and
+// return one end of the pipe and the other end will be returned by the dial
+// function.
+func InMemoryNetwork() (net.Listener, func() net.Conn) {
+	listener := &inMemoryListener{
+		conns:  make(chan net.Conn, 16),
+		closed: make(chan struct{}),
+	}
+	dialer := func() net.Conn {
+		server, client := net.Pipe()
+		listener.conns <- server
+		return client
+	}
+	return listener, dialer
+}
+
+type inMemoryListener struct {
+	conns  chan net.Conn
+	closed chan struct{}
+}
+
+// Accept waits for and returns the next connection to the listener.
+func (l *inMemoryListener) Accept() (net.Conn, error) {
+	select {
+	case conn := <-l.conns:
+		return conn, nil
+	case <-l.closed:
+		return nil, fmt.Errorf("closed")
+	}
+}
+
+// Close closes the listener.
+// Any blocked Accept operations will be unblocked and return errors.
+func (l *inMemoryListener) Close() error {
+	close(l.closed)
+	return nil
+}
+
+// Addr returns the listener's network address.
+func (l *inMemoryListener) Addr() net.Addr {
+	return &inMemoryAddr{}
+}
+
+type inMemoryAddr struct {
+}
+
+func (a *inMemoryAddr) Network() string {
+	return "memory"
+}
+
+func (a *inMemoryAddr) String() string {
+	return ""
+}
+
 // CanonicalNetworkAddress parses the given network address and returns a
 // string of the form "host:port", possibly filling it with the default port if
 // it's missing.
diff --git a/lxd/util/net_test.go b/lxd/util/net_test.go
index 0b29eb576..a56581464 100644
--- a/lxd/util/net_test.go
+++ b/lxd/util/net_test.go
@@ -6,8 +6,26 @@ import (
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/mpvl/subtest"
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
+// The connection returned by the dialer is paired with the one returned by the
+// Accept() method of the listener.
+func TestInMemoryNetwork(t *testing.T) {
+	listener, dialer := util.InMemoryNetwork()
+	client := dialer()
+	server, err := listener.Accept()
+	require.NoError(t, err)
+
+	go client.Write([]byte("hello"))
+	buffer := make([]byte, 5)
+	n, err := server.Read(buffer)
+	require.NoError(t, err)
+
+	assert.Equal(t, 5, n)
+	assert.Equal(t, []byte("hello"), buffer)
+}
+
 func TestCanonicalNetworkAddress(t *testing.T) {
 	cases := map[string]string{
 		"127.0.0.1":                             "127.0.0.1:8443",

From 9e411407d7938e0994bc12921f81f94b4f17c970 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 11 Sep 2017 12:19:44 +0000
Subject: [PATCH 005/227] Add db.Cluster with basic initialization

A new Cluster structure has been added to the lxd/db sub-package. It
is meant to mediate access to the dqlite-based cluster database. It
uses the go-grpc-sql package to serialize SQL queries over a gRPC
connection against the target dqlite leader node.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/open.go   | 48 ++++++++++++++++++++++++++++++++++
 lxd/db/db.go             | 38 ++++++++++++++++++++++++++-
 lxd/db/db_export_test.go |  9 +++++++
 lxd/db/db_test.go        | 16 ++++++++++++
 lxd/db/testing.go        | 67 ++++++++++++++++++++++++++++++++++++++++++++++++
 lxd/db/transaction.go    | 18 +++++++++++++
 6 files changed, 195 insertions(+), 1 deletion(-)
 create mode 100644 lxd/db/cluster/open.go
 create mode 100644 lxd/db/db_export_test.go

diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
new file mode 100644
index 000000000..d135dea6f
--- /dev/null
+++ b/lxd/db/cluster/open.go
@@ -0,0 +1,48 @@
+package cluster
+
+import (
+	"database/sql"
+	"fmt"
+	"sync/atomic"
+
+	"github.com/CanonicalLtd/go-grpc-sql"
+)
+
+// Open the cluster database object.
+//
+// The name argument is the name of the cluster database. It defaults to
+// 'db.bin', but can be overwritten for testing.
+//
+// The dialer argument is a function that returns a gRPC dialer that can be
+// used to connect to a database node using the gRPC SQL package.
+func Open(name string, dialer grpcsql.Dialer) (*sql.DB, error) {
+	driver := grpcsql.NewDriver(dialer)
+	driverName := grpcSQLDriverName()
+	sql.Register(driverName, driver)
+
+	// Create the cluster db. This won't immediately establish any gRPC
+	// connection, that will happen only when a db transaction is started
+	// (see the database/sql connection pooling code for more details).
+	if name == "" {
+		name = "db.bin"
+	}
+	db, err := sql.Open(driverName, name)
+	if err != nil {
+		return nil, fmt.Errorf("cannot open cluster database: %v", err)
+	}
+
+	return db, nil
+}
+
+// Generate a new name for the grpcsql driver registration. We need it to be
+// unique for testing, see below.
+func grpcSQLDriverName() string {
+	defer atomic.AddUint64(&grpcSQLDriverSerial, 1)
+	return fmt.Sprintf("grpc-%d", grpcSQLDriverSerial)
+}
+
+// Monotonic serial number for registering new instances of grpcsql.Driver
+// using the database/sql stdlib package. This is needed since there's no way
+// to unregister drivers, and in unit tests more than one driver gets
+// registered.
+var grpcSQLDriverSerial uint64
diff --git a/lxd/db/db.go b/lxd/db/db.go
index c43bba0f3..f1eae9653 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -5,8 +5,10 @@ import (
 	"fmt"
 	"time"
 
+	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/mattn/go-sqlite3"
 
+	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/db/node"
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/logger"
@@ -30,7 +32,6 @@ var (
 // Node mediates access to LXD's data stored in the node-local SQLite database.
 type Node struct {
 	db *sql.DB // Handle to the node-local SQLite database file.
-
 }
 
 // OpenNode creates a new Node object.
@@ -111,6 +112,41 @@ func (n *Node) Begin() (*sql.Tx, error) {
 	return begin(n.db)
 }
 
+// Cluster mediates access to LXD's data stored in the cluster dqlite database.
+type Cluster struct {
+	db *sql.DB // Handle to the cluster dqlite database, gated behind gRPC SQL.
+}
+
+// OpenCluster creates a new Cluster object for interacting with the dqlite
+// database.
+func OpenCluster(name string, dialer grpcsql.Dialer) (*Cluster, error) {
+	db, err := cluster.Open(name, dialer)
+	if err != nil {
+		return nil, err
+	}
+	cluster := &Cluster{
+		db: db,
+	}
+	return cluster, nil
+}
+
+// Transaction creates a new ClusterTx object and transactionally executes the
+// cluster database interactions invoked by the given function. If the function
+// returns no error, all database changes are committed to the cluster database
+// database, otherwise they are rolled back.
+func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
+	clusterTx := &ClusterTx{}
+	return query.Transaction(c.db, func(tx *sql.Tx) error {
+		clusterTx.tx = tx
+		return f(clusterTx)
+	})
+}
+
+// Close the database facade.
+func (c *Cluster) Close() error {
+	return c.db.Close()
+}
+
 // UpdateSchemasDotGo updates the schema.go files in the local/ and cluster/
 // sub-packages.
 func UpdateSchemasDotGo() error {
diff --git a/lxd/db/db_export_test.go b/lxd/db/db_export_test.go
new file mode 100644
index 000000000..a975c9081
--- /dev/null
+++ b/lxd/db/db_export_test.go
@@ -0,0 +1,9 @@
+package db
+
+import "database/sql"
+
+// DB returns the low level database handle to the cluster gRPC SQL database
+// handler. Used by tests for introspecing the database with raw SQL.
+func (c *Cluster) DB() *sql.DB {
+	return c.db
+}
diff --git a/lxd/db/db_test.go b/lxd/db/db_test.go
index 243d48de0..cf2eeb6df 100644
--- a/lxd/db/db_test.go
+++ b/lxd/db/db_test.go
@@ -23,3 +23,19 @@ func TestNode_Schema(t *testing.T) {
 	assert.NoError(t, rows.Scan(&n))
 	assert.Equal(t, 1, n)
 }
+
+// A gRPC SQL connection is established when starting to interact with the
+// cluster database.
+func TestCluster_Setup(t *testing.T) {
+	cluster, cleanup := db.NewTestCluster(t)
+	defer cleanup()
+
+	db := cluster.DB()
+	rows, err := db.Query("SELECT COUNT(*) FROM sqlite_master")
+	assert.NoError(t, err)
+	defer rows.Close()
+	assert.Equal(t, true, rows.Next())
+	var n uint
+	assert.NoError(t, rows.Scan(&n))
+	assert.Zero(t, n)
+}
diff --git a/lxd/db/testing.go b/lxd/db/testing.go
index 188e6f630..1cb6344d3 100644
--- a/lxd/db/testing.go
+++ b/lxd/db/testing.go
@@ -2,10 +2,16 @@ package db
 
 import (
 	"io/ioutil"
+	"net"
 	"os"
 	"testing"
+	"time"
 
+	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/mattn/go-sqlite3"
 	"github.com/stretchr/testify/require"
+	"google.golang.org/grpc"
 )
 
 // NewTestNode creates a new Node for testing purposes, along with a function
@@ -43,3 +49,64 @@ func NewTestNodeTx(t *testing.T) (*NodeTx, func()) {
 
 	return nodeTx, cleanup
 }
+
+// NewTestCluster creates a new Cluster for testing purposes, along with a function
+// that can be used to clean it up when done.
+func NewTestCluster(t *testing.T) (*Cluster, func()) {
+	// Create an in-memory gRPC SQL server and dialer.
+	server, dialer := newGrpcServer()
+
+	cluster, err := OpenCluster(":memory:", dialer)
+	require.NoError(t, err)
+
+	cleanup := func() {
+		require.NoError(t, cluster.Close())
+		server.Stop()
+	}
+
+	return cluster, cleanup
+}
+
+// NewTestClusterTx returns a fresh ClusterTx object, along with a function that can
+// be called to cleanup state when done with it.
+func NewTestClusterTx(t *testing.T) (*ClusterTx, func()) {
+	cluster, clusterCleanup := NewTestCluster(t)
+
+	var err error
+
+	clusterTx := &ClusterTx{}
+	clusterTx.tx, err = cluster.db.Begin()
+	require.NoError(t, err)
+
+	cleanup := func() {
+		err := clusterTx.tx.Commit()
+		require.NoError(t, err)
+		clusterCleanup()
+	}
+
+	return clusterTx, cleanup
+}
+
+// Create a new in-memory gRPC server attached to a grpc-sql gateway backed by a
+// SQLite driver.
+//
+// Return the newly created gRPC server and a dialer that can be used to
+// connect to it.
+func newGrpcServer() (*grpc.Server, grpcsql.Dialer) {
+	listener, dial := util.InMemoryNetwork()
+	server := grpcsql.NewServer(&sqlite3.SQLiteDriver{})
+
+	// Setup an in-memory gRPC dialer.
+	options := []grpc.DialOption{
+		grpc.WithInsecure(),
+		grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
+			return dial(), nil
+		}),
+	}
+	dialer := func() (*grpc.ClientConn, error) {
+		return grpc.Dial("", options...)
+	}
+
+	go server.Serve(listener)
+	return server, dialer
+}
diff --git a/lxd/db/transaction.go b/lxd/db/transaction.go
index 4e1d89c66..de30c11f7 100644
--- a/lxd/db/transaction.go
+++ b/lxd/db/transaction.go
@@ -9,3 +9,21 @@ import "database/sql"
 type NodeTx struct {
 	tx *sql.Tx // Handle to a transaction in the node-level SQLite database.
 }
+
+// Tx returns the low level database handle to the node-local SQLite
+// transaction.
+//
+// FIXME: this is a transitional method needed for compatibility with some
+//        legacy call sites. It should be removed when there are no more
+//        consumers.
+func (n *NodeTx) Tx() *sql.Tx {
+	return n.tx
+}
+
+// ClusterTx models a single interaction with a LXD cluster database.
+//
+// It wraps low-level sql.Tx objects and offers a high-level API to fetch and
+// update data.
+type ClusterTx struct {
+	tx *sql.Tx // Handle to a transaction in the cluster dqlite database.
+}

From 548acbb894ebf4be57925fcf84e1a7adc5de3d2a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 11:38:10 +0000
Subject: [PATCH 006/227] Add cluster.Gateway to manage the lifecycle of the
 cluster database

This is a first version of the Gateway object, an API that the daemon
will use in order to 1) run a dqlite node (if appropriate) 2) connect
to the leader dqlite node via gRPC.

For now there's no actual dqlite plumbing in place, and all the
Gateway does is to expose an regular sqlite db over an in-memory gRPC
network (client/server).

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go      | 103 ++++++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/gateway_test.go |  40 +++++++++++++++++
 lxd/db/db.go                |  11 ++++-
 3 files changed, 152 insertions(+), 2 deletions(-)
 create mode 100644 lxd/cluster/gateway.go
 create mode 100644 lxd/cluster/gateway_test.go

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
new file mode 100644
index 000000000..41aee225b
--- /dev/null
+++ b/lxd/cluster/gateway.go
@@ -0,0 +1,103 @@
+package cluster
+
+import (
+	"net"
+	"time"
+
+	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
+	"github.com/mattn/go-sqlite3"
+	"google.golang.org/grpc"
+)
+
+// NewGateway creates a new Gateway for managing access to the dqlite cluster.
+//
+// When a new gateway is created, the node-level database is queried to check
+// what kind of role this node plays and if it's exposed over the network. It
+// will initialize internal data structures accordingly, for example starting a
+// dqlite driver if this node is a database node.
+//
+// After creation, the Daemon is expected to expose whatever http handlers the
+// HandlerFuncs method returns and to access the dqlite cluster using the gRPC
+// dialer returned by the Dialer method.
+func NewGateway(db *db.Node, cert *shared.CertInfo, latency float64) (*Gateway, error) {
+	gateway := &Gateway{
+		db:      db,
+		cert:    cert,
+		latency: latency,
+	}
+
+	err := gateway.init()
+	if err != nil {
+		return nil, err
+	}
+
+	return gateway, nil
+}
+
+// Gateway mediates access to the dqlite cluster using a gRPC SQL client, and
+// possibly runs a dqlite replica on this LXD node (if we're configured to do
+// so).
+type Gateway struct {
+	db      *db.Node
+	cert    *shared.CertInfo
+	latency float64
+
+	// The gRPC server exposing the dqlite driver created by this
+	// gateway. It's nil if this LXD node is not supposed to be part of the
+	// raft cluster.
+	server *grpc.Server
+
+	// A dialer that will connect to the gRPC server using an in-memory
+	// net.Conn. It's non-nil when clustering is not enabled on this LXD
+	// node, and so we don't expose any dqlite or raft network endpoint,
+	// but still we want to use dqlite as backend for the "cluster"
+	// database, to minimize the difference between code paths in
+	// clustering and non-clustering modes.
+	memoryDial func() (*grpc.ClientConn, error)
+}
+
+// Dialer returns a gRPC dial function that can be used to connect to one of
+// the dqlite nodes via gRPC.
+func (g *Gateway) Dialer() grpcsql.Dialer {
+	return func() (*grpc.ClientConn, error) {
+		// Memory connection.
+		return g.memoryDial()
+	}
+}
+
+// Shutdown this gateway, stopping the gRPC server and possibly the raft factory.
+func (g *Gateway) Shutdown() error {
+	if g.server != nil {
+		g.server.Stop()
+		// Unset the memory dial, since Shutdown() is also called for
+		// switching between in-memory and network mode.
+		g.memoryDial = nil
+	}
+	return nil
+}
+
+// Initialize the gateway, creating a new raft factory and gRPC server (if this
+// node is a database node), and a gRPC dialer.
+func (g *Gateway) init() error {
+	g.server = grpcsql.NewServer(&sqlite3.SQLiteDriver{})
+	listener, dial := util.InMemoryNetwork()
+	go g.server.Serve(listener)
+	g.memoryDial = grpcMemoryDial(dial)
+	return nil
+}
+
+// Convert a raw in-memory dial function into a gRPC one.
+func grpcMemoryDial(dial func() net.Conn) func() (*grpc.ClientConn, error) {
+	options := []grpc.DialOption{
+		grpc.WithInsecure(),
+		grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
+			return dial(), nil
+		}),
+	}
+	return func() (*grpc.ClientConn, error) {
+		return grpc.Dial("", options...)
+	}
+}
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
new file mode 100644
index 000000000..33072e993
--- /dev/null
+++ b/lxd/cluster/gateway_test.go
@@ -0,0 +1,40 @@
+package cluster_test
+
+import (
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logging"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Basic creation and shutdown. By default, the gateway runs an in-memory gRPC
+// server.
+func TestGateway_Single(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+	gateway := newGateway(t, db, cert)
+	defer gateway.Shutdown()
+
+	dialer := gateway.Dialer()
+	conn, err := dialer()
+	assert.NoError(t, err)
+	assert.NotNil(t, conn)
+}
+
+// Create a new test Gateway with the given parameters, and ensure no error
+// happens.
+func newGateway(t *testing.T, db *db.Node, certInfo *shared.CertInfo) *cluster.Gateway {
+	logging.Testing(t)
+	require.NoError(t, os.Mkdir(filepath.Join(db.Dir(), "raft"), 0755))
+	gateway, err := cluster.NewGateway(db, certInfo, 0.2)
+	require.NoError(t, err)
+	return gateway
+}
diff --git a/lxd/db/db.go b/lxd/db/db.go
index f1eae9653..9c6add273 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -31,7 +31,8 @@ var (
 
 // Node mediates access to LXD's data stored in the node-local SQLite database.
 type Node struct {
-	db *sql.DB // Handle to the node-local SQLite database file.
+	db  *sql.DB // Handle to the node-local SQLite database file.
+	dir string  // Reference to the directory where the database file lives.
 }
 
 // OpenNode creates a new Node object.
@@ -55,7 +56,8 @@ func OpenNode(dir string, fresh func(*Node) error, legacyPatches map[int]*Legacy
 	}
 
 	node := &Node{
-		db: db,
+		db:  db,
+		dir: dir,
 	}
 
 	if initial == 0 {
@@ -90,6 +92,11 @@ func (n *Node) DB() *sql.DB {
 	return n.db
 }
 
+// Dir returns the directory of the underlying SQLite database file.
+func (n *Node) Dir() string {
+	return n.dir
+}
+
 // Transaction creates a new NodeTx object and transactionally executes the
 // node-level database interactions invoked by the given function. If the
 // function returns no error, all database changes are committed to the

From 9925dfd7980d613652fa80532c403e86fcd124bc Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 12:01:54 +0000
Subject: [PATCH 007/227] Wire cluster.Gateway into Daemon

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go                  | 71 +++++++++++++++++++++++++++++++-----------
 lxd/daemon_integration_test.go |  4 ++-
 lxd/main_daemon.go             |  5 ++-
 3 files changed, 58 insertions(+), 22 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 5262cdce3..d1cbdb9ec 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -25,6 +25,7 @@ import (
 	"gopkg.in/macaroon-bakery.v2/bakery/identchecker"
 	"gopkg.in/macaroon-bakery.v2/httpbakery"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/endpoints"
 	"github.com/lxc/lxd/lxd/maas"
@@ -45,6 +46,7 @@ type Daemon struct {
 	os           *sys.OS
 	db           *db.Node
 	maas         *maas.Controller
+	cluster      *db.Cluster
 	readyChan    chan bool
 	shutdownChan chan bool
 
@@ -58,6 +60,7 @@ type Daemon struct {
 
 	config    *DaemonConfig
 	endpoints *endpoints.Endpoints
+	gateway   *cluster.Gateway
 
 	proxy func(req *http.Request) (*url.URL, error)
 
@@ -71,7 +74,8 @@ type externalAuth struct {
 
 // DaemonConfig holds configuration values for Daemon.
 type DaemonConfig struct {
-	Group string // Group name the local unix socket should be chown'ed to
+	Group       string  // Group name the local unix socket should be chown'ed to
+	RaftLatency float64 // Coarse grain measure of the cluster latency
 }
 
 // NewDaemon returns a new Daemon object with the given configuration.
@@ -82,9 +86,16 @@ func NewDaemon(config *DaemonConfig, os *sys.OS) *Daemon {
 	}
 }
 
+// DefaultDaemonConfig returns a DaemonConfig object with default values/
+func DefaultDaemonConfig() *DaemonConfig {
+	return &DaemonConfig{
+		RaftLatency: 1.0,
+	}
+}
+
 // DefaultDaemon returns a new, un-initialized Daemon object with default values.
 func DefaultDaemon() *Daemon {
-	config := &DaemonConfig{}
+	config := DefaultDaemonConfig()
 	os := sys.DefaultOS()
 	return NewDaemon(config, os)
 }
@@ -364,6 +375,37 @@ func (d *Daemon) init() error {
 		return err
 	}
 
+	/* Setup server certificate */
+	certInfo, err := shared.KeyPairAndCA(d.os.VarDir, "server", shared.CertServer)
+	if err != nil {
+		return err
+	}
+
+	/* Setup dqlite */
+	d.gateway, err = cluster.NewGateway(d.db, certInfo, d.config.RaftLatency)
+	if err != nil {
+		return err
+	}
+
+	/* Setup some mounts (nice to have) */
+	if !d.os.MockMode {
+		// Attempt to mount the shmounts tmpfs
+		setupSharedMounts()
+
+		// Attempt to Mount the devlxd tmpfs
+		devlxd := filepath.Join(d.os.VarDir, "devlxd")
+		if !shared.IsMountPoint(devlxd) {
+			syscall.Mount("tmpfs", devlxd, "tmpfs", 0, "size=100k,mode=0755")
+		}
+	}
+
+	/* Open the cluster database */
+	clusterFilename := filepath.Join(d.os.VarDir, "db.bin")
+	d.cluster, err = db.OpenCluster(clusterFilename, d.gateway.Dialer())
+	if err != nil {
+		return err
+	}
+
 	/* Read the storage pools */
 	err = SetupStorageDriver(d.State(), false)
 	if err != nil {
@@ -398,17 +440,6 @@ func (d *Daemon) init() error {
 		daemonConfig["core.proxy_ignore_hosts"].Get(),
 	)
 
-	/* Setup some mounts (nice to have) */
-	if !d.os.MockMode {
-		// Attempt to mount the shmounts tmpfs
-		setupSharedMounts()
-
-		// Attempt to Mount the devlxd tmpfs
-		if !shared.IsMountPoint(shared.VarPath("devlxd")) {
-			syscall.Mount("tmpfs", shared.VarPath("devlxd"), "tmpfs", 0, "size=100k,mode=0755")
-		}
-	}
-
 	if !d.os.MockMode {
 		/* Start the scheduler */
 		go deviceEventListener(d.State())
@@ -429,11 +460,6 @@ func (d *Daemon) init() error {
 	}
 
 	/* Setup the web server */
-	certInfo, err := shared.KeyPairAndCA(d.os.VarDir, "server", shared.CertServer)
-	if err != nil {
-		return err
-	}
-
 	config := &endpoints.Config{
 		Dir:                  d.os.VarDir,
 		Cert:                 certInfo,
@@ -541,6 +567,15 @@ func (d *Daemon) Stop() error {
 		logger.Infof("Closing the database")
 		trackError(d.db.Close())
 	}
+	if d.cluster != nil {
+		trackError(d.cluster.Close())
+	}
+	if d.gateway != nil {
+		trackError(d.gateway.Shutdown())
+	}
+	if d.endpoints != nil {
+		trackError(d.endpoints.Down())
+	}
 
 	logger.Infof("Saving simplestreams cache")
 	trackError(imageSaveStreamCache(d.os))
diff --git a/lxd/daemon_integration_test.go b/lxd/daemon_integration_test.go
index 79e8700b3..0f689dfa5 100644
--- a/lxd/daemon_integration_test.go
+++ b/lxd/daemon_integration_test.go
@@ -55,7 +55,9 @@ func newDaemon(t *testing.T) (*Daemon, func()) {
 
 // Create a new DaemonConfig object for testing purposes.
 func newConfig() *DaemonConfig {
-	return &DaemonConfig{}
+	return &DaemonConfig{
+		RaftLatency: 0.2,
+	}
 }
 
 // Create a new sys.OS object for testing purposes.
diff --git a/lxd/main_daemon.go b/lxd/main_daemon.go
index 4b0948544..7b9d84372 100644
--- a/lxd/main_daemon.go
+++ b/lxd/main_daemon.go
@@ -38,9 +38,8 @@ func cmdDaemon(args *Args) error {
 		}
 
 	}
-	c := &DaemonConfig{
-		Group: args.Group,
-	}
+	c := DefaultDaemonConfig()
+	c.Group = args.Group
 	d := NewDaemon(c, sys.DefaultOS())
 	err = d.Init()
 	if err != nil {

From d9e8dab69d05d57c8eaa0b405aaa995e32f6ede1 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 12 Oct 2017 20:12:21 +0000
Subject: [PATCH 008/227] Add V1 cluster schema

This is an initial pass at creating the first version of the cluster
database schema.

An new updateFromV0 patch has been added, which for now only creates a
single table ("nodes") for holding the list of all LXD nodes
participating to the cluster.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/open.go        |  9 +++++++++
 lxd/db/cluster/schema.go      | 22 +++++++++++++++++++++
 lxd/db/cluster/update.go      | 46 +++++++++++++++++++++++++++++++++++++++++++
 lxd/db/cluster/update_test.go | 26 ++++++++++++++++++++++++
 lxd/db/db.go                  |  6 +++++-
 5 files changed, 108 insertions(+), 1 deletion(-)
 create mode 100644 lxd/db/cluster/schema.go
 create mode 100644 lxd/db/cluster/update.go
 create mode 100644 lxd/db/cluster/update_test.go

diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index d135dea6f..bf05f8790 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -34,6 +34,15 @@ func Open(name string, dialer grpcsql.Dialer) (*sql.DB, error) {
 	return db, nil
 }
 
+// EnsureSchema applies all relevant schema updates to the cluster database.
+//
+// Return the initial schema version found before starting the update, along
+// with any error occurred.
+func EnsureSchema(db *sql.DB) (int, error) {
+	schema := Schema()
+	return schema.Ensure(db)
+}
+
 // Generate a new name for the grpcsql driver registration. We need it to be
 // unique for testing, see below.
 func grpcSQLDriverName() string {
diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
new file mode 100644
index 000000000..90a358e96
--- /dev/null
+++ b/lxd/db/cluster/schema.go
@@ -0,0 +1,22 @@
+package cluster
+
+// DO NOT EDIT BY HAND
+//
+// This code was generated by the schema.DotGo function. If you need to
+// modify the database schema, please add a new schema update to update.go
+// and the run 'make update-schema'.
+const freshSchema = `
+CREATE TABLE nodes (
+    id INTEGER PRIMARY KEY,
+    name TEXT NOT NULL,
+    description TEXT DEFAULT '',
+    address TEXT NOT NULL,
+    schema INTEGER NOT NULL,
+    api_extensions INTEGER NOT NULL,
+    heartbeat DATETIME DEFAULT CURRENT_TIMESTAMP,
+    UNIQUE (name),
+    UNIQUE (address)
+);
+
+INSERT INTO schema (version, updated_at) VALUES (1, strftime("%s"))
+`
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
new file mode 100644
index 000000000..3d43e9b2e
--- /dev/null
+++ b/lxd/db/cluster/update.go
@@ -0,0 +1,46 @@
+package cluster
+
+import (
+	"database/sql"
+
+	"github.com/lxc/lxd/lxd/db/schema"
+)
+
+// Schema for the cluster database.
+func Schema() *schema.Schema {
+	schema := schema.NewFromMap(updates)
+	schema.Fresh(freshSchema)
+	return schema
+}
+
+// SchemaDotGo refreshes the schema.go file in this package, using the updates
+// defined here.
+func SchemaDotGo() error {
+	return schema.DotGo(updates, "schema")
+}
+
+// SchemaVersion is the current version of the cluster database schema.
+var SchemaVersion = len(updates)
+
+var updates = map[int]schema.Update{
+	1: updateFromV0,
+}
+
+func updateFromV0(tx *sql.Tx) error {
+	// v0..v1 the dawn of clustering
+	stmt := `
+CREATE TABLE nodes (
+    id INTEGER PRIMARY KEY,
+    name TEXT NOT NULL,
+    description TEXT DEFAULT '',
+    address TEXT NOT NULL,
+    schema INTEGER NOT NULL,
+    api_extensions INTEGER NOT NULL,
+    heartbeat DATETIME DEFAULT CURRENT_TIMESTAMP,
+    UNIQUE (name),
+    UNIQUE (address)
+);
+`
+	_, err := tx.Exec(stmt)
+	return err
+}
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
new file mode 100644
index 000000000..c80a51574
--- /dev/null
+++ b/lxd/db/cluster/update_test.go
@@ -0,0 +1,26 @@
+package cluster_test
+
+import (
+	"testing"
+	"time"
+
+	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/stretchr/testify/require"
+)
+
+func TestUpdateFromV0(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(1, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'foo', 'blah', '1.2.3.4:666', 1, 32, ?)", time.Now())
+	require.NoError(t, err)
+
+	// Unique constraint on name
+	_, err = db.Exec("INSERT INTO nodes VALUES (2, 'foo', 'gosh', '5.6.7.8:666', 5, 20, ?)", time.Now())
+	require.Error(t, err)
+
+	// Unique constraint on address
+	_, err = db.Exec("INSERT INTO nodes VALUES (3, 'bar', 'gasp', '1.2.3.4:666', 9, 11), ?)", time.Now())
+	require.Error(t, err)
+}
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 9c6add273..0bc0d0e39 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -159,7 +159,11 @@ func (c *Cluster) Close() error {
 func UpdateSchemasDotGo() error {
 	err := node.SchemaDotGo()
 	if err != nil {
-		return fmt.Errorf("failed to update local schema.go: %v", err)
+		return fmt.Errorf("failed to update node schema.go: %v", err)
+	}
+	err = cluster.SchemaDotGo()
+	if err != nil {
+		return fmt.Errorf("failed to update cluster schema.go: %v", err)
 	}
 
 	return nil

From eced6063387a81c2d871c157b6e91ff8948c4a9c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 12:24:49 +0000
Subject: [PATCH 009/227] Wire cluster.EnsureSchema into db.OpenCluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/db.go      | 10 +++++++++-
 lxd/db/db_test.go | 32 +++++++++++++++++++-------------
 2 files changed, 28 insertions(+), 14 deletions(-)

diff --git a/lxd/db/db.go b/lxd/db/db.go
index 0bc0d0e39..f07ced010 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -7,6 +7,7 @@ import (
 
 	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/mattn/go-sqlite3"
+	"github.com/pkg/errors"
 
 	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/db/node"
@@ -129,11 +130,18 @@ type Cluster struct {
 func OpenCluster(name string, dialer grpcsql.Dialer) (*Cluster, error) {
 	db, err := cluster.Open(name, dialer)
 	if err != nil {
-		return nil, err
+		return nil, errors.Wrap(err, "failed to open database")
+	}
+
+	_, err = cluster.EnsureSchema(db)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to ensure schema")
 	}
+
 	cluster := &Cluster{
 		db: db,
 	}
+
 	return cluster, nil
 }
 
diff --git a/lxd/db/db_test.go b/lxd/db/db_test.go
index cf2eeb6df..33b27c003 100644
--- a/lxd/db/db_test.go
+++ b/lxd/db/db_test.go
@@ -4,7 +4,9 @@ import (
 	"testing"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 // Node database objects automatically initialize their schema as needed.
@@ -15,13 +17,14 @@ func TestNode_Schema(t *testing.T) {
 	// The underlying node-level database has exactly one row in the schema
 	// table.
 	db := node.DB()
-	rows, err := db.Query("SELECT COUNT(*) FROM schema")
-	assert.NoError(t, err)
-	defer rows.Close()
-	assert.Equal(t, true, rows.Next())
-	var n int
-	assert.NoError(t, rows.Scan(&n))
+	tx, err := db.Begin()
+	require.NoError(t, err)
+	n, err := query.Count(tx, "schema", "")
+	require.NoError(t, err)
 	assert.Equal(t, 1, n)
+
+	assert.NoError(t, tx.Commit())
+	assert.NoError(t, db.Close())
 }
 
 // A gRPC SQL connection is established when starting to interact with the
@@ -30,12 +33,15 @@ func TestCluster_Setup(t *testing.T) {
 	cluster, cleanup := db.NewTestCluster(t)
 	defer cleanup()
 
+	// The underlying node-level database has exactly one row in the schema
+	// table.
 	db := cluster.DB()
-	rows, err := db.Query("SELECT COUNT(*) FROM sqlite_master")
-	assert.NoError(t, err)
-	defer rows.Close()
-	assert.Equal(t, true, rows.Next())
-	var n uint
-	assert.NoError(t, rows.Scan(&n))
-	assert.Zero(t, n)
+	tx, err := db.Begin()
+	require.NoError(t, err)
+	n, err := query.Count(tx, "schema", "")
+	require.NoError(t, err)
+	assert.Equal(t, 1, n)
+
+	assert.NoError(t, tx.Commit())
+	assert.NoError(t, db.Close())
 }

From ba114946cc2f911feaf0c8134f52654a51858af2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 13 Oct 2017 10:21:56 +0000
Subject: [PATCH 010/227] Check the versions of other nodes in
 cluster.EnsureSchema

Modify cluster.EnsureSchema to also check that all other nodes in the
cluster have a schema version and an API extensions count that match
the ones of the node.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go                        |   9 +-
 lxd/db/cluster/open.go               | 124 +++++++++++++++++++++++-
 lxd/db/cluster/open_test.go          | 180 +++++++++++++++++++++++++++++++++++
 lxd/db/cluster/query.go              |  50 ++++++++++
 lxd/db/cluster/schema_export_test.go |   3 +
 lxd/db/db.go                         |  16 +++-
 lxd/db/testing.go                    |   2 +-
 7 files changed, 374 insertions(+), 10 deletions(-)
 create mode 100644 lxd/db/cluster/open_test.go
 create mode 100644 lxd/db/cluster/query.go
 create mode 100644 lxd/db/cluster/schema_export_test.go

diff --git a/lxd/daemon.go b/lxd/daemon.go
index d1cbdb9ec..abea2cb4a 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -19,6 +19,7 @@ import (
 	"github.com/gorilla/mux"
 	"github.com/juju/idmclient"
 	_ "github.com/mattn/go-sqlite3"
+	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"gopkg.in/macaroon-bakery.v2/bakery"
 	"gopkg.in/macaroon-bakery.v2/bakery/checkers"
@@ -399,11 +400,13 @@ func (d *Daemon) init() error {
 		}
 	}
 
+	address := daemonConfig["core.https_address"].Get()
+
 	/* Open the cluster database */
 	clusterFilename := filepath.Join(d.os.VarDir, "db.bin")
-	d.cluster, err = db.OpenCluster(clusterFilename, d.gateway.Dialer())
+	d.cluster, err = db.OpenCluster(clusterFilename, d.gateway.Dialer(), address)
 	if err != nil {
-		return err
+		return errors.Wrap(err, "failed to open cluster database")
 	}
 
 	/* Read the storage pools */
@@ -466,7 +469,7 @@ func (d *Daemon) init() error {
 		RestServer:           RestServer(d),
 		DevLxdServer:         DevLxdServer(d),
 		LocalUnixSocketGroup: d.config.Group,
-		NetworkAddress:       daemonConfig["core.https_address"].Get(),
+		NetworkAddress:       address,
 	}
 	d.endpoints, err = endpoints.Up(config)
 	if err != nil {
diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index bf05f8790..f9b3139e7 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -6,6 +6,9 @@ import (
 	"sync/atomic"
 
 	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/lxc/lxd/lxd/db/schema"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/pkg/errors"
 )
 
 // Open the cluster database object.
@@ -36,11 +39,58 @@ func Open(name string, dialer grpcsql.Dialer) (*sql.DB, error) {
 
 // EnsureSchema applies all relevant schema updates to the cluster database.
 //
-// Return the initial schema version found before starting the update, along
-// with any error occurred.
-func EnsureSchema(db *sql.DB) (int, error) {
+// Before actually doing anything, this function will make sure that all nodes
+// in the cluster have a schema version and a number of API extensions that
+// match our one. If it's not the case, we either return an error (if some
+// nodes have version greater than us and we need to be upgraded), or return
+// false and no error (if some nodes have a lower version, and we need to wait
+// till they get upgraded and restarted).
+func EnsureSchema(db *sql.DB, address string) (bool, error) {
+	someNodesAreBehind := false
+	apiExtensions := len(version.APIExtensions)
+
+	check := func(current int, tx *sql.Tx) error {
+		// If we're bootstrapping a fresh schema, skip any check, since
+		// it's safe to assume we are the only node.
+		if current == 0 {
+			return nil
+		}
+
+		// Check if we're clustered
+		n, err := selectNodesCount(tx)
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch current nodes count")
+		}
+		if n == 0 {
+			return nil // Nothing to do.
+		}
+
+		// Update the schema and api_extension columns of ourselves.
+		err = updateNodeVersion(tx, address, apiExtensions)
+		if err != nil {
+			return errors.Wrap(err, "failed to update node version")
+
+		}
+
+		err = checkClusterIsUpgradable(tx, [2]int{len(updates), apiExtensions})
+		if err == errSomeNodesAreBehind {
+			someNodesAreBehind = true
+			return schema.ErrGracefulAbort
+		}
+		return err
+	}
+
 	schema := Schema()
-	return schema.Ensure(db)
+	schema.Check(check)
+
+	_, err := schema.Ensure(db)
+	if someNodesAreBehind {
+		return false, nil
+	}
+	if err != nil {
+		return false, err
+	}
+	return true, err
 }
 
 // Generate a new name for the grpcsql driver registration. We need it to be
@@ -55,3 +105,69 @@ func grpcSQLDriverName() string {
 // to unregister drivers, and in unit tests more than one driver gets
 // registered.
 var grpcSQLDriverSerial uint64
+
+func checkClusterIsUpgradable(tx *sql.Tx, target [2]int) error {
+	// Get the current versions in the nodes table.
+	versions, err := selectNodesVersions(tx)
+	if err != nil {
+		return errors.Wrap(err, "failed to fetch current nodes versions")
+	}
+
+	for _, version := range versions {
+		n, err := compareVersions(target, version)
+		if err != nil {
+			return err
+		}
+		switch n {
+		case 0:
+			// Versions are equal, there's hope for the
+			// update. Let's check the next node.
+			continue
+		case 1:
+			// Our version is bigger, we should stop here
+			// and wait for other nodes to be upgraded and
+			// restarted.
+			return errSomeNodesAreBehind
+		case 2:
+			// Another node has a version greater than ours
+			// and presumeably is waiting for other nodes
+			// to upgrade. Let's error out and shutdown
+			// since we need a greater version.
+			return fmt.Errorf("this node's version is behind, please upgrade")
+		default:
+			// Sanity.
+			panic("unexpected return value from compareVersions")
+		}
+	}
+	return nil
+}
+
+// Compare two nodes versions.
+//
+// A version consists of the version the node's schema and the number of API
+// extensions it supports.
+//
+// Return 0 if they equal, 1 if the first version is greater than the second
+// and 2 if the second is greater than the first.
+//
+// Return an error if inconsistent versions are detected, for example the first
+// node's schema is greater than the second's, but the number of extensions is
+// smaller.
+func compareVersions(version1, version2 [2]int) (int, error) {
+	schema1, extensions1 := version1[0], version1[1]
+	schema2, extensions2 := version2[0], version2[1]
+
+	if schema1 == schema2 && extensions1 == extensions2 {
+		return 0, nil
+	}
+	if schema1 >= schema2 && extensions1 >= extensions2 {
+		return 1, nil
+	}
+	if schema1 <= schema2 && extensions1 <= extensions2 {
+		return 2, nil
+	}
+
+	return -1, fmt.Errorf("nodes have inconsistent versions")
+}
+
+var errSomeNodesAreBehind = fmt.Errorf("some nodes are behind this node's version")
diff --git a/lxd/db/cluster/open_test.go b/lxd/db/cluster/open_test.go
new file mode 100644
index 000000000..f858d7b35
--- /dev/null
+++ b/lxd/db/cluster/open_test.go
@@ -0,0 +1,180 @@
+package cluster_test
+
+import (
+	"database/sql"
+	"fmt"
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/mpvl/subtest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// If the node is not clustered, the schema updates works normally.
+func TestEnsureSchema_NoClustered(t *testing.T) {
+	db := newDB(t)
+	ready, err := cluster.EnsureSchema(db, "1.2.3.4:666")
+	assert.True(t, ready)
+	assert.NoError(t, err)
+}
+
+// Exercise EnsureSchema failures when the cluster can't be upgraded right now.
+func TestEnsureSchema_ClusterNotUpgradable(t *testing.T) {
+	schema := cluster.SchemaVersion
+	apiExtensions := len(version.APIExtensions)
+
+	cases := []struct {
+		title string
+		setup func(*testing.T, *sql.DB)
+		ready bool
+		error string
+	}{
+		{
+			`a node's schema version is behind`,
+			func(t *testing.T, db *sql.DB) {
+				addNode(t, db, "1", schema, apiExtensions)
+				addNode(t, db, "2", schema-1, apiExtensions)
+			},
+			false, // The schema was not updated
+			"",    // No error is returned
+		},
+		{
+			`a node's number of API extensions is behind`,
+			func(t *testing.T, db *sql.DB) {
+				addNode(t, db, "1", schema, apiExtensions)
+				addNode(t, db, "2", schema, apiExtensions-1)
+			},
+			false, // The schema was not updated
+			"",    // No error is returned
+		},
+		{
+			`this node's schema is behind`,
+			func(t *testing.T, db *sql.DB) {
+				addNode(t, db, "1", schema, apiExtensions)
+				addNode(t, db, "2", schema+1, apiExtensions)
+			},
+			false,
+			"this node's version is behind, please upgrade",
+		},
+		{
+			`this node's number of API extensions is behind`,
+			func(t *testing.T, db *sql.DB) {
+				addNode(t, db, "1", schema, apiExtensions)
+				addNode(t, db, "2", schema, apiExtensions+1)
+			},
+			false,
+			"this node's version is behind, please upgrade",
+		},
+		{
+			`inconsistent schema version and API extensions number`,
+			func(t *testing.T, db *sql.DB) {
+				addNode(t, db, "1", schema, apiExtensions)
+				addNode(t, db, "2", schema+1, apiExtensions-1)
+			},
+			false,
+			"nodes have inconsistent versions",
+		},
+	}
+	for _, c := range cases {
+		subtest.Run(t, c.title, func(t *testing.T) {
+			db := newDB(t)
+			c.setup(t, db)
+			ready, err := cluster.EnsureSchema(db, "1")
+			assert.Equal(t, c.ready, ready)
+			if c.error == "" {
+				assert.NoError(t, err)
+			} else {
+				assert.EqualError(t, err, c.error)
+			}
+		})
+	}
+}
+
+// Regardless of whether the schema could actually be upgraded or not, the
+// version of this node gets updated.
+func TestEnsureSchema_UpdateNodeVersion(t *testing.T) {
+	schema := cluster.SchemaVersion
+	apiExtensions := len(version.APIExtensions)
+
+	cases := []struct {
+		setup func(*testing.T, *sql.DB)
+		ready bool
+	}{
+		{
+			func(t *testing.T, db *sql.DB) {},
+			true,
+		},
+		{
+			func(t *testing.T, db *sql.DB) {
+				// Add a node which is behind.
+				addNode(t, db, "2", schema, apiExtensions-1)
+			},
+			true,
+		},
+	}
+	for _, c := range cases {
+		subtest.Run(t, fmt.Sprintf("%v", c.ready), func(t *testing.T) {
+			db := newDB(t)
+
+			// Add ourselves with an older schema version and API
+			// extensions number.
+			addNode(t, db, "1", schema-1, apiExtensions-1)
+
+			// Ensure the schema.
+			ready, err := cluster.EnsureSchema(db, "1")
+			assert.NoError(t, err)
+			assert.Equal(t, c.ready, ready)
+
+			// Check that the nodes table was updated with our new
+			// schema version and API extensions number.
+			assertNode(t, db, "1", schema, apiExtensions)
+		})
+	}
+}
+
+// Create a new in-memory SQLite database with a fresh cluster schema.
+func newDB(t *testing.T) *sql.DB {
+	db, err := sql.Open("sqlite3", ":memory:")
+	require.NoError(t, err)
+
+	createTableSchema := `
+CREATE TABLE schema (
+    id         INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    version    INTEGER NOT NULL,
+    updated_at DATETIME NOT NULL,
+    UNIQUE (version)
+);
+`
+	_, err = db.Exec(createTableSchema + cluster.FreshSchema)
+	require.NoError(t, err)
+
+	return db
+}
+
+// Add a new node with the given address, schema version and number of api extensions.
+func addNode(t *testing.T, db *sql.DB, address string, schema int, apiExtensions int) {
+	err := query.Transaction(db, func(tx *sql.Tx) error {
+		stmt := `
+INSERT INTO nodes(name, address, schema, api_extensions) VALUES (?, ?, ?, ?)
+`
+		name := fmt.Sprintf("node at %s", address)
+		_, err := tx.Exec(stmt, name, address, schema, apiExtensions)
+		return err
+	})
+	require.NoError(t, err)
+}
+
+// Assert that the node with the given address has the given schema version and API
+// extensions number.
+func assertNode(t *testing.T, db *sql.DB, address string, schema int, apiExtensions int) {
+	err := query.Transaction(db, func(tx *sql.Tx) error {
+		where := "address=? AND schema=? AND api_extensions=?"
+		n, err := query.Count(tx, "nodes", where, address, schema, apiExtensions)
+		assert.Equal(t, 1, n, "node does not have expected version")
+		return err
+	})
+	require.NoError(t, err)
+}
diff --git a/lxd/db/cluster/query.go b/lxd/db/cluster/query.go
new file mode 100644
index 000000000..286ffe2db
--- /dev/null
+++ b/lxd/db/cluster/query.go
@@ -0,0 +1,50 @@
+package cluster
+
+import (
+	"database/sql"
+	"fmt"
+
+	"github.com/lxc/lxd/lxd/db/query"
+)
+
+// Update the schema and api_extensions columns of the row in the nodes table
+// that matches the given id.
+//
+// If not such row is found, an error is returned.
+func updateNodeVersion(tx *sql.Tx, address string, apiExtensions int) error {
+	stmt := "UPDATE nodes SET schema=?, api_extensions=? WHERE address=?"
+	result, err := tx.Exec(stmt, len(updates), apiExtensions, address)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return fmt.Errorf("updated %d rows instead of 1", n)
+	}
+	return nil
+}
+
+// Return the number of rows in the nodes table.
+func selectNodesCount(tx *sql.Tx) (int, error) {
+	return query.Count(tx, "nodes", "")
+}
+
+// Return a slice of binary integer tuples. Each tuple contains the schema
+// version and number of api extensions of a node in the cluster.
+func selectNodesVersions(tx *sql.Tx) ([][2]int, error) {
+	versions := [][2]int{}
+
+	dest := func(i int) []interface{} {
+		versions = append(versions, [2]int{})
+		return []interface{}{&versions[i][0], &versions[i][1]}
+	}
+
+	err := query.SelectObjects(tx, dest, "SELECT schema, api_extensions FROM nodes")
+	if err != nil {
+		return nil, err
+	}
+	return versions, nil
+}
diff --git a/lxd/db/cluster/schema_export_test.go b/lxd/db/cluster/schema_export_test.go
new file mode 100644
index 000000000..d2041016a
--- /dev/null
+++ b/lxd/db/cluster/schema_export_test.go
@@ -0,0 +1,3 @@
+package cluster
+
+var FreshSchema = freshSchema
diff --git a/lxd/db/db.go b/lxd/db/db.go
index f07ced010..6b4a49b6d 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -28,6 +28,8 @@ var (
 	 * already do.
 	 */
 	NoSuchObjectError = fmt.Errorf("No such object")
+
+	Upgrading = fmt.Errorf("The cluster database is upgrading")
 )
 
 // Node mediates access to LXD's data stored in the node-local SQLite database.
@@ -127,13 +129,23 @@ type Cluster struct {
 
 // OpenCluster creates a new Cluster object for interacting with the dqlite
 // database.
-func OpenCluster(name string, dialer grpcsql.Dialer) (*Cluster, error) {
+//
+// - name: Basename of the database file holding the data. Typically "db.bin".
+// - dialer: Function used to connect to the dqlite backend via gRPC SQL.
+// - address: Network address of this node (or empty string).
+// - api: Number of API extensions that this node supports.
+//
+// The address and api parameters will be used to determine if the cluster
+// database matches our version, and possibly trigger a schema update. If the
+// schema update can't be performed right now, because some nodes are still
+// behind, an Upgrading error is returned.
+func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster, error) {
 	db, err := cluster.Open(name, dialer)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to open database")
 	}
 
-	_, err = cluster.EnsureSchema(db)
+	_, err = cluster.EnsureSchema(db, address)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to ensure schema")
 	}
diff --git a/lxd/db/testing.go b/lxd/db/testing.go
index 1cb6344d3..65c5ddcae 100644
--- a/lxd/db/testing.go
+++ b/lxd/db/testing.go
@@ -56,7 +56,7 @@ func NewTestCluster(t *testing.T) (*Cluster, func()) {
 	// Create an in-memory gRPC SQL server and dialer.
 	server, dialer := newGrpcServer()
 
-	cluster, err := OpenCluster(":memory:", dialer)
+	cluster, err := OpenCluster(":memory:", dialer, "1")
 	require.NoError(t, err)
 
 	cleanup := func() {

From 917abbac92680f3d2075cbe61fdeb14d4061bc96 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 12 Oct 2017 16:18:14 +0000
Subject: [PATCH 011/227] Rename State.DB to State.Node and add State.Cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container.go             | 34 +++++++++++++++++-----------------
 lxd/container_lxc.go         | 18 +++++++++---------
 lxd/containers.go            |  8 ++++----
 lxd/containers_get.go        |  2 +-
 lxd/daemon.go                |  2 +-
 lxd/devices.go               |  8 ++++----
 lxd/logging.go               |  2 +-
 lxd/networks.go              |  8 ++++----
 lxd/networks_utils.go        |  4 ++--
 lxd/profiles.go              |  6 +++---
 lxd/state/state.go           | 16 +++++++++-------
 lxd/storage.go               | 28 ++++++++++++++--------------
 lxd/storage_ceph.go          |  2 +-
 lxd/storage_lvm_utils.go     |  6 +++---
 lxd/storage_pools_utils.go   | 10 +++++-----
 lxd/storage_volumes_utils.go | 16 ++++++++--------
 16 files changed, 86 insertions(+), 84 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index ca01006bd..1a75e0a67 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -596,7 +596,7 @@ func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (contai
 	// Now create the empty snapshot
 	err = c.Storage().ContainerSnapshotCreateEmpty(c)
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -605,7 +605,7 @@ func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (contai
 
 func containerCreateFromImage(s *state.State, args db.ContainerArgs, hash string) (container, error) {
 	// Get the image properties
-	_, img, err := s.DB.ImageGet(hash, false, false)
+	_, img, err := s.Node.ImageGet(hash, false, false)
 	if err != nil {
 		return nil, err
 	}
@@ -626,16 +626,16 @@ func containerCreateFromImage(s *state.State, args db.ContainerArgs, hash string
 		return nil, err
 	}
 
-	err = s.DB.ImageLastAccessUpdate(hash, time.Now().UTC())
+	err = s.Node.ImageLastAccessUpdate(hash, time.Now().UTC())
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, fmt.Errorf("Error updating image last use date: %s", err)
 	}
 
 	// Now create the storage from an image
 	err = c.Storage().ContainerCreateFromImage(c, hash)
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -660,7 +660,7 @@ func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContaine
 	if !containerOnly {
 		snapshots, err := sourceContainer.Snapshots()
 		if err != nil {
-			s.DB.ContainerRemove(args.Name)
+			s.Node.ContainerRemove(args.Name)
 			return nil, err
 		}
 
@@ -692,9 +692,9 @@ func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContaine
 	err = ct.Storage().ContainerCopy(ct, sourceContainer, containerOnly)
 	if err != nil {
 		for _, v := range csList {
-			s.DB.ContainerRemove((*v).Name())
+			s.Node.ContainerRemove((*v).Name())
 		}
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -773,7 +773,7 @@ func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceCont
 	// Clone the container
 	err = sourceContainer.Storage().ContainerSnapshotCreate(c, sourceContainer)
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -836,7 +836,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Validate container devices
-	err = containerValidDevices(s.DB, args.Devices, false, false)
+	err = containerValidDevices(s.Node, args.Devices, false, false)
 	if err != nil {
 		return nil, err
 	}
@@ -852,7 +852,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Validate profiles
-	profiles, err := s.DB.Profiles()
+	profiles, err := s.Node.Profiles()
 	if err != nil {
 		return nil, err
 	}
@@ -864,7 +864,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Create the container entry
-	id, err := s.DB.ContainerCreate(args)
+	id, err := s.Node.ContainerCreate(args)
 	if err != nil {
 		if err == db.DbErrAlreadyDefined {
 			thing := "Container"
@@ -882,9 +882,9 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	args.Id = id
 
 	// Read the timestamp from the database
-	dbArgs, err := s.DB.ContainerGet(args.Name)
+	dbArgs, err := s.Node.ContainerGet(args.Name)
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 	args.CreationDate = dbArgs.CreationDate
@@ -893,7 +893,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	// Setup the container struct and finish creation (storage and idmap)
 	c, err := containerLXCCreate(s, args)
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -948,7 +948,7 @@ func containerConfigureInternal(c container) error {
 
 func containerLoadById(s *state.State, id int) (container, error) {
 	// Get the DB record
-	name, err := s.DB.ContainerName(id)
+	name, err := s.Node.ContainerName(id)
 	if err != nil {
 		return nil, err
 	}
@@ -958,7 +958,7 @@ func containerLoadById(s *state.State, id int) (container, error) {
 
 func containerLoadByName(s *state.State, name string) (container, error) {
 	// Get the DB record
-	args, err := s.DB.ContainerGet(name)
+	args, err := s.Node.ContainerGet(name)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 16c461ef0..adb5856a7 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -271,7 +271,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	// Create the container struct
 	c := &containerLXC{
 		state:        s,
-		db:           s.DB,
+		db:           s.Node,
 		id:           args.Id,
 		name:         args.Name,
 		description:  args.Description,
@@ -307,7 +307,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 		return nil, err
 	}
 
-	err = containerValidDevices(s.DB, c.expandedDevices, false, true)
+	err = containerValidDevices(s.Node, c.expandedDevices, false, true)
 	if err != nil {
 		c.Delete()
 		logger.Error("Failed creating container", ctxMap)
@@ -329,7 +329,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	storagePool := rootDiskDevice["pool"]
 
 	// Get the storage pool ID for the container
-	poolID, pool, err := s.DB.StoragePoolGet(storagePool)
+	poolID, pool, err := s.Node.StoragePoolGet(storagePool)
 	if err != nil {
 		c.Delete()
 		return nil, err
@@ -343,7 +343,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	}
 
 	// Create a new database entry for the container's storage volume
-	_, err = s.DB.StoragePoolVolumeCreate(args.Name, "", storagePoolVolumeTypeContainer, poolID, volumeConfig)
+	_, err = s.Node.StoragePoolVolumeCreate(args.Name, "", storagePoolVolumeTypeContainer, poolID, volumeConfig)
 	if err != nil {
 		c.Delete()
 		return nil, err
@@ -353,7 +353,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	cStorage, err := storagePoolVolumeContainerCreateInit(s, storagePool, args.Name)
 	if err != nil {
 		c.Delete()
-		s.DB.StoragePoolVolumeDelete(args.Name, storagePoolVolumeTypeContainer, poolID)
+		s.Node.StoragePoolVolumeDelete(args.Name, storagePoolVolumeTypeContainer, poolID)
 		logger.Error("Failed to initialize container storage", ctxMap)
 		return nil, err
 	}
@@ -447,7 +447,7 @@ func containerLXCLoad(s *state.State, args db.ContainerArgs) (container, error)
 	// Create the container struct
 	c := &containerLXC{
 		state:        s,
-		db:           s.DB,
+		db:           s.Node,
 		id:           args.Id,
 		name:         args.Name,
 		description:  args.Description,
@@ -733,7 +733,7 @@ func findIdmap(state *state.State, cName string, isolatedStr string, configBase
 	idmapLock.Lock()
 	defer idmapLock.Unlock()
 
-	cs, err := state.DB.ContainersList(db.CTypeRegular)
+	cs, err := state.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return nil, 0, err
 	}
@@ -3380,12 +3380,12 @@ func writeBackupFile(c container) error {
 	}
 
 	s := c.DaemonState()
-	poolID, pool, err := s.DB.StoragePoolGet(poolName)
+	poolID, pool, err := s.Node.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
 
-	_, volume, err := s.DB.StoragePoolVolumeGetType(c.Name(), storagePoolVolumeTypeContainer, poolID)
+	_, volume, err := s.Node.StoragePoolVolumeGetType(c.Name(), storagePoolVolumeTypeContainer, poolID)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/containers.go b/lxd/containers.go
index 800ff5f24..2b884f270 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -106,7 +106,7 @@ func (slice containerAutostartList) Swap(i, j int) {
 
 func containersRestart(s *state.State) error {
 	// Get all the containers
-	result, err := s.DB.ContainersList(db.CTypeRegular)
+	result, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
@@ -179,7 +179,7 @@ func containersShutdown(s *state.State) error {
 	var wg sync.WaitGroup
 
 	// Get all the containers
-	results, err := s.DB.ContainersList(db.CTypeRegular)
+	results, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
@@ -198,7 +198,7 @@ func containersShutdown(s *state.State) error {
 	sort.Sort(containerStopList(containers))
 
 	// Reset all container states
-	err = s.DB.ContainersResetState()
+	err = s.Node.ContainersResetState()
 	if err != nil {
 		return err
 	}
@@ -256,7 +256,7 @@ func containerDeleteSnapshots(s *state.State, cname string) error {
 	logger.Debug("containerDeleteSnapshots",
 		log.Ctx{"container": cname})
 
-	results, err := s.DB.ContainerGetSnapshots(cname)
+	results, err := s.Node.ContainerGetSnapshots(cname)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index b86dbb336..9ae37928b 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -34,7 +34,7 @@ func containersGet(d *Daemon, r *http.Request) Response {
 }
 
 func doContainersGet(s *state.State, recursion bool) (interface{}, error) {
-	result, err := s.DB.ContainersList(db.CTypeRegular)
+	result, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index abea2cb4a..59468c3e4 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -186,7 +186,7 @@ func isJSONRequest(r *http.Request) bool {
 
 // State creates a new State instance liked to our internal db and os.
 func (d *Daemon) State() *state.State {
-	return state.NewState(d.db, d.maas, d.os)
+	return state.NewState(d.db, d.cluster, d.maas, d.os)
 }
 
 // UnixSocket returns the full path to the unix.socket file that this daemon is
diff --git a/lxd/devices.go b/lxd/devices.go
index ec11a7de9..6a8f341f7 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -604,7 +604,7 @@ func deviceTaskBalance(s *state.State) {
 	}
 
 	// Iterate through the containers
-	containers, err := s.DB.ContainersList(db.CTypeRegular)
+	containers, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		logger.Error("problem loading containers list", log.Ctx{"err": err})
 		return
@@ -730,7 +730,7 @@ func deviceNetworkPriority(s *state.State, netif string) {
 		return
 	}
 
-	containers, err := s.DB.ContainersList(db.CTypeRegular)
+	containers, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return
 	}
@@ -761,7 +761,7 @@ func deviceNetworkPriority(s *state.State, netif string) {
 }
 
 func deviceUSBEvent(s *state.State, usb usbDevice) {
-	containers, err := s.DB.ContainersList(db.CTypeRegular)
+	containers, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		logger.Error("problem loading containers list", log.Ctx{"err": err})
 		return
@@ -847,7 +847,7 @@ func deviceEventListener(s *state.State) {
 
 			logger.Debugf("Scheduler: network: %s has been added: updating network priorities", e[0])
 			deviceNetworkPriority(s, e[0])
-			networkAutoAttach(s.DB, e[0])
+			networkAutoAttach(s.Node, e[0])
 		case e := <-chUSB:
 			deviceUSBEvent(s, e)
 		case e := <-deviceSchedRebalance:
diff --git a/lxd/logging.go b/lxd/logging.go
index 6587149cd..8a0856f13 100644
--- a/lxd/logging.go
+++ b/lxd/logging.go
@@ -41,7 +41,7 @@ func expireLogs(ctx context.Context, state *state.State) error {
 	var containers []string
 	ch := make(chan struct{})
 	go func() {
-		containers, err = state.DB.ContainersList(db.CTypeRegular)
+		containers, err = state.Node.ContainersList(db.CTypeRegular)
 		ch <- struct{}{}
 	}()
 	select {
diff --git a/lxd/networks.go b/lxd/networks.go
index 9bab7a1fe..8a27a8b88 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -399,19 +399,19 @@ var networkCmd = Command{name: "networks/{name}", get: networkGet, delete: netwo
 
 // The network structs and functions
 func networkLoadByName(s *state.State, name string) (*network, error) {
-	id, dbInfo, err := s.DB.NetworkGet(name)
+	id, dbInfo, err := s.Node.NetworkGet(name)
 	if err != nil {
 		return nil, err
 	}
 
-	n := network{db: s.DB, state: s, id: id, name: name, description: dbInfo.Description, config: dbInfo.Config}
+	n := network{db: s.Node, state: s, id: id, name: name, description: dbInfo.Description, config: dbInfo.Config}
 
 	return &n, nil
 }
 
 func networkStartup(s *state.State) error {
 	// Get a list of managed networks
-	networks, err := s.DB.Networks()
+	networks, err := s.Node.Networks()
 	if err != nil {
 		return err
 	}
@@ -435,7 +435,7 @@ func networkStartup(s *state.State) error {
 
 func networkShutdown(s *state.State) error {
 	// Get a list of managed networks
-	networks, err := s.DB.Networks()
+	networks, err := s.Node.Networks()
 	if err != nil {
 		return err
 	}
diff --git a/lxd/networks_utils.go b/lxd/networks_utils.go
index 20b9b9024..d10b4b00e 100644
--- a/lxd/networks_utils.go
+++ b/lxd/networks_utils.go
@@ -744,7 +744,7 @@ func networkUpdateStatic(s *state.State, networkName string) error {
 	defer networkStaticLock.Unlock()
 
 	// Get all the containers
-	containers, err := s.DB.ContainersList(db.CTypeRegular)
+	containers, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
@@ -753,7 +753,7 @@ func networkUpdateStatic(s *state.State, networkName string) error {
 	var networks []string
 	if networkName == "" {
 		var err error
-		networks, err = s.DB.Networks()
+		networks, err = s.Node.Networks()
 		if err != nil {
 			return err
 		}
diff --git a/lxd/profiles.go b/lxd/profiles.go
index 6b36e2203..e92a08034 100644
--- a/lxd/profiles.go
+++ b/lxd/profiles.go
@@ -105,12 +105,12 @@ var profilesCmd = Command{
 	post: profilesPost}
 
 func doProfileGet(s *state.State, name string) (*api.Profile, error) {
-	_, profile, err := s.DB.ProfileGet(name)
+	_, profile, err := s.Node.ProfileGet(name)
 	if err != nil {
 		return nil, err
 	}
 
-	cts, err := s.DB.ProfileContainersGet(name)
+	cts, err := s.Node.ProfileContainersGet(name)
 	if err != nil {
 		return nil, err
 	}
@@ -139,7 +139,7 @@ func profileGet(d *Daemon, r *http.Request) Response {
 func getContainersWithProfile(s *state.State, profile string) []container {
 	results := []container{}
 
-	output, err := s.DB.ProfileContainersGet(profile)
+	output, err := s.Node.ProfileContainersGet(profile)
 	if err != nil {
 		return results
 	}
diff --git a/lxd/state/state.go b/lxd/state/state.go
index aad9d22b5..10bd8fbf8 100644
--- a/lxd/state/state.go
+++ b/lxd/state/state.go
@@ -10,17 +10,19 @@ import (
 // and the operating system. It's typically used by model entities such as
 // containers, volumes, etc. in order to perform changes.
 type State struct {
-	DB   *db.Node
-	MAAS *maas.Controller
-	OS   *sys.OS
+	Node    *db.Node
+	Cluster *db.Cluster
+	MAAS    *maas.Controller
+	OS      *sys.OS
 }
 
 // NewState returns a new State object with the given database and operating
 // system components.
-func NewState(db *db.Node, maas *maas.Controller, os *sys.OS) *State {
+func NewState(node *db.Node, cluster *db.Cluster, maas *maas.Controller, os *sys.OS) *State {
 	return &State{
-		DB:   db,
-		MAAS: maas,
-		OS:   os,
+		Node:    node,
+		Cluster: cluster,
+		MAAS:    maas,
+		OS:      os,
 	}
 }
diff --git a/lxd/storage.go b/lxd/storage.go
index 4a651d70e..d6c697063 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -283,7 +283,7 @@ func storageCoreInit(driver string) (storage, error) {
 
 func storageInit(s *state.State, poolName string, volumeName string, volumeType int) (storage, error) {
 	// Load the storage pool.
-	poolID, pool, err := s.DB.StoragePoolGet(poolName)
+	poolID, pool, err := s.Node.StoragePoolGet(poolName)
 	if err != nil {
 		return nil, err
 	}
@@ -298,7 +298,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 	// Load the storage volume.
 	volume := &api.StorageVolume{}
 	if volumeName != "" && volumeType >= 0 {
-		_, volume, err = s.DB.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+		_, volume, err = s.Node.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 		if err != nil {
 			return nil, err
 		}
@@ -316,7 +316,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		btrfs.pool = pool
 		btrfs.volume = volume
 		btrfs.s = s
-		btrfs.db = s.DB
+		btrfs.db = s.Node
 		err = btrfs.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -328,7 +328,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		dir.pool = pool
 		dir.volume = volume
 		dir.s = s
-		dir.db = s.DB
+		dir.db = s.Node
 		err = dir.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -340,7 +340,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		ceph.pool = pool
 		ceph.volume = volume
 		ceph.s = s
-		ceph.db = s.DB
+		ceph.db = s.Node
 		err = ceph.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -352,7 +352,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		lvm.pool = pool
 		lvm.volume = volume
 		lvm.s = s
-		lvm.db = s.DB
+		lvm.db = s.Node
 		err = lvm.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -364,7 +364,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		mock.pool = pool
 		mock.volume = volume
 		mock.s = s
-		mock.db = s.DB
+		mock.db = s.Node
 		err = mock.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -376,7 +376,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		zfs.pool = pool
 		zfs.volume = volume
 		zfs.s = s
-		zfs.db = s.DB
+		zfs.db = s.Node
 		err = zfs.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -517,11 +517,11 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
 
 	st.SetStoragePoolVolumeWritable(&poolVolumePut)
 
-	poolID, err := s.DB.StoragePoolGetID(poolName)
+	poolID, err := s.Node.StoragePoolGetID(poolName)
 	if err != nil {
 		return nil, err
 	}
-	err = s.DB.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
+	err = s.Node.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
 	if err != nil {
 		return nil, err
 	}
@@ -544,7 +544,7 @@ func storagePoolVolumeContainerCreateInit(s *state.State, poolName string, conta
 
 func storagePoolVolumeContainerLoadInit(s *state.State, containerName string) (storage, error) {
 	// Get the storage pool of a given container.
-	poolName, err := s.DB.ContainerPool(containerName)
+	poolName, err := s.Node.ContainerPool(containerName)
 	if err != nil {
 		return nil, err
 	}
@@ -810,7 +810,7 @@ func StorageProgressWriter(op *operation, key string, description string) func(i
 }
 
 func SetupStorageDriver(s *state.State, forceCheck bool) error {
-	pools, err := s.DB.StoragePools()
+	pools, err := s.Node.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			logger.Debugf("No existing storage pools detected.")
@@ -827,7 +827,7 @@ func SetupStorageDriver(s *state.State, forceCheck bool) error {
 	// but the upgrade somehow got messed up then there will be no
 	// "storage_api" entry in the db.
 	if len(pools) > 0 && !forceCheck {
-		appliedPatches, err := s.DB.Patches()
+		appliedPatches, err := s.Node.Patches()
 		if err != nil {
 			return err
 		}
@@ -854,7 +854,7 @@ func SetupStorageDriver(s *state.State, forceCheck bool) error {
 	}
 
 	// Update the storage drivers cache in api_1.0.go.
-	storagePoolDriversCacheUpdate(s.DB)
+	storagePoolDriversCacheUpdate(s.Node)
 	return nil
 }
 
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 1af3cc410..0c6c7f0ba 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -972,7 +972,7 @@ func (s *storageCeph) ContainerCreateFromImage(container container, fingerprint
 			fingerprint, storagePoolVolumeTypeNameImage, s.UserName)
 
 		if ok {
-			_, volume, err := s.s.DB.StoragePoolVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
+			_, volume, err := s.s.Node.StoragePoolVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
 			if err != nil {
 				return err
 			}
diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go
index f9f3f1340..261e457b9 100644
--- a/lxd/storage_lvm_utils.go
+++ b/lxd/storage_lvm_utils.go
@@ -497,7 +497,7 @@ func (s *storageLvm) containerCreateFromImageThinLv(c container, fp string) erro
 		var imgerr error
 		ok, _ := storageLVExists(imageLvmDevPath)
 		if ok {
-			_, volume, err := s.s.DB.StoragePoolVolumeGetType(fp, db.StoragePoolVolumeTypeImage, s.poolID)
+			_, volume, err := s.s.Node.StoragePoolVolumeGetType(fp, db.StoragePoolVolumeTypeImage, s.poolID)
 			if err != nil {
 				return err
 			}
@@ -684,7 +684,7 @@ func storageLVMThinpoolExists(vgName string, poolName string) (bool, error) {
 func storageLVMGetThinPoolUsers(s *state.State) ([]string, error) {
 	results := []string{}
 
-	cNames, err := s.DB.ContainersList(db.CTypeRegular)
+	cNames, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return results, err
 	}
@@ -702,7 +702,7 @@ func storageLVMGetThinPoolUsers(s *state.State) ([]string, error) {
 		}
 	}
 
-	imageNames, err := s.DB.ImagesGet(false)
+	imageNames, err := s.Node.ImagesGet(false)
 	if err != nil {
 		return results, err
 	}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 6df16c870..99dd4f690 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -62,7 +62,7 @@ func storagePoolUpdate(state *state.State, name, newDescription string, newConfi
 
 	// Update the database if something changed
 	if len(changedConfig) != 0 || newDescription != oldDescription {
-		err = state.DB.StoragePoolUpdate(name, newDescription, newConfig)
+		err = state.Node.StoragePoolUpdate(name, newDescription, newConfig)
 		if err != nil {
 			return err
 		}
@@ -164,7 +164,7 @@ func storagePoolDBCreate(s *state.State, poolName, poolDescription string, drive
 	}
 
 	// Check that the storage pool does not already exist.
-	_, err = s.DB.StoragePoolGetID(poolName)
+	_, err = s.Node.StoragePoolGetID(poolName)
 	if err == nil {
 		return fmt.Errorf("The storage pool already exists")
 	}
@@ -187,7 +187,7 @@ func storagePoolDBCreate(s *state.State, poolName, poolDescription string, drive
 	}
 
 	// Create the database entry for the storage pool.
-	_, err = dbStoragePoolCreateAndUpdateCache(s.DB, poolName, poolDescription, driver, config)
+	_, err = dbStoragePoolCreateAndUpdateCache(s.Node, poolName, poolDescription, driver, config)
 	if err != nil {
 		return fmt.Errorf("Error inserting %s into database: %s", poolName, err)
 	}
@@ -209,7 +209,7 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 		if !tryUndo {
 			return
 		}
-		dbStoragePoolDeleteAndUpdateCache(state.DB, poolName)
+		dbStoragePoolDeleteAndUpdateCache(state.Node, poolName)
 	}()
 
 	s, err := storagePoolInit(state, poolName)
@@ -238,7 +238,7 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 	configDiff, _ := storageConfigDiff(config, postCreateConfig)
 	if len(configDiff) > 0 {
 		// Create the database entry for the storage pool.
-		err = state.DB.StoragePoolUpdate(poolName, poolDescription, postCreateConfig)
+		err = state.Node.StoragePoolUpdate(poolName, poolDescription, postCreateConfig)
 		if err != nil {
 			return fmt.Errorf("Error inserting %s into database: %s", poolName, err)
 		}
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index 7e690e60b..c79b1e461 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -151,14 +151,14 @@ func storagePoolVolumeUpdate(state *state.State, poolName string, volumeName str
 		s.SetStoragePoolVolumeWritable(&newWritable)
 	}
 
-	poolID, err := state.DB.StoragePoolGetID(poolName)
+	poolID, err := state.Node.StoragePoolGetID(poolName)
 	if err != nil {
 		return err
 	}
 
 	// Update the database if something changed
 	if len(changedConfig) != 0 || newDescription != oldDescription {
-		err = state.DB.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, newDescription, newConfig)
+		err = state.Node.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, newDescription, newConfig)
 		if err != nil {
 			return err
 		}
@@ -172,7 +172,7 @@ func storagePoolVolumeUpdate(state *state.State, poolName string, volumeName str
 
 func storagePoolVolumeUsedByContainersGet(s *state.State, volumeName string,
 	volumeTypeName string) ([]string, error) {
-	cts, err := s.DB.ContainersList(db.CTypeRegular)
+	cts, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return []string{}, err
 	}
@@ -233,7 +233,7 @@ func storagePoolVolumeUsedByGet(s *state.State, volumeName string, volumeTypeNam
 			fmt.Sprintf("/%s/containers/%s", version.APIVersion, ct))
 	}
 
-	profiles, err := profilesUsingPoolVolumeGetNames(s.DB, volumeName, volumeTypeName)
+	profiles, err := profilesUsingPoolVolumeGetNames(s.Node, volumeName, volumeTypeName)
 	if err != nil {
 		return []string{}, err
 	}
@@ -302,14 +302,14 @@ func storagePoolVolumeDBCreate(s *state.State, poolName string, volumeName, volu
 	}
 
 	// Load storage pool the volume will be attached to.
-	poolID, poolStruct, err := s.DB.StoragePoolGet(poolName)
+	poolID, poolStruct, err := s.Node.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
 
 	// Check that a storage volume of the same storage volume type does not
 	// already exist.
-	volumeID, _ := s.DB.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
+	volumeID, _ := s.Node.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
 	if volumeID > 0 {
 		return fmt.Errorf("a storage volume of type %s does already exist", volumeTypeName)
 	}
@@ -331,7 +331,7 @@ func storagePoolVolumeDBCreate(s *state.State, poolName string, volumeName, volu
 	}
 
 	// Create the database entry for the storage volume.
-	_, err = s.DB.StoragePoolVolumeCreate(volumeName, volumeDescription, volumeType, poolID, volumeConfig)
+	_, err = s.Node.StoragePoolVolumeCreate(volumeName, volumeDescription, volumeType, poolID, volumeConfig)
 	if err != nil {
 		return fmt.Errorf("Error inserting %s of type %s into database: %s", poolName, volumeTypeName, err)
 	}
@@ -361,7 +361,7 @@ func storagePoolVolumeCreateInternal(state *state.State, poolName string, volume
 	// Create storage volume.
 	err = s.StoragePoolVolumeCreate()
 	if err != nil {
-		state.DB.StoragePoolVolumeDelete(volumeName, volumeType, poolID)
+		state.Node.StoragePoolVolumeDelete(volumeName, volumeType, poolID)
 		return err
 	}
 

From 884023f2003d2efba3abbc6fc49dcc09c5dd04d0 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 12 Oct 2017 16:52:25 +0000
Subject: [PATCH 012/227] Add testing facilities for state.State and sys.OS

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/state/testing.go | 29 +++++++++++++++++++++++++++++
 lxd/sys/testing.go   | 28 ++++++++++++++++++++++++++++
 2 files changed, 57 insertions(+)
 create mode 100644 lxd/state/testing.go
 create mode 100644 lxd/sys/testing.go

diff --git a/lxd/state/testing.go b/lxd/state/testing.go
new file mode 100644
index 000000000..27d3ac86a
--- /dev/null
+++ b/lxd/state/testing.go
@@ -0,0 +1,29 @@
+package state
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/sys"
+)
+
+// NewTestState returns a State object initialized with testable instances of
+// the node/cluster databases and of the OS facade.
+//
+// Return the newly created State object, along with a function that can be
+// used for cleaning it up.
+func NewTestState(t *testing.T) (*State, func()) {
+	node, nodeCleanup := db.NewTestNode(t)
+	cluster, clusterCleanup := db.NewTestCluster(t)
+	os, osCleanup := sys.NewTestOS(t)
+
+	cleanup := func() {
+		nodeCleanup()
+		clusterCleanup()
+		osCleanup()
+	}
+
+	state := NewState(node, cluster, nil, os)
+
+	return state, cleanup
+}
diff --git a/lxd/sys/testing.go b/lxd/sys/testing.go
new file mode 100644
index 000000000..b0bb8a42a
--- /dev/null
+++ b/lxd/sys/testing.go
@@ -0,0 +1,28 @@
+package sys
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+// NewTestOS returns a new OS instance initialized with test values.
+func NewTestOS(t *testing.T) (*OS, func()) {
+	dir, err := ioutil.TempDir("", "lxd-sys-os-test-")
+	require.NoError(t, err)
+
+	cleanup := func() {
+		require.NoError(t, os.RemoveAll(dir))
+	}
+
+	os := &OS{
+		VarDir:   dir,
+		CacheDir: filepath.Join(dir, "cache"),
+		LogDir:   filepath.Join(dir, "log"),
+	}
+
+	return os, cleanup
+}

From 67a5278a29530a2b63ae1f4c1bbfc4bfbc69c11a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 15 Sep 2017 07:24:43 +0000
Subject: [PATCH 013/227] Add db APIs to read and update the raft_nodes table

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/query/slices.go |  12 ++---
 lxd/db/raft.go         | 114 ++++++++++++++++++++++++++++++++++++++++++
 lxd/db/raft_test.go    | 133 +++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 253 insertions(+), 6 deletions(-)
 create mode 100644 lxd/db/raft.go
 create mode 100644 lxd/db/raft_test.go

diff --git a/lxd/db/query/slices.go b/lxd/db/query/slices.go
index 59d0cc892..6cd9a7934 100644
--- a/lxd/db/query/slices.go
+++ b/lxd/db/query/slices.go
@@ -8,7 +8,7 @@ import (
 
 // SelectStrings executes a statement which must yield rows with a single string
 // column. It returns the list of column values.
-func SelectStrings(tx *sql.Tx, query string) ([]string, error) {
+func SelectStrings(tx *sql.Tx, query string, args ...interface{}) ([]string, error) {
 	values := []string{}
 	scan := func(rows *sql.Rows) error {
 		var value string
@@ -20,7 +20,7 @@ func SelectStrings(tx *sql.Tx, query string) ([]string, error) {
 		return nil
 	}
 
-	err := scanSingleColumn(tx, query, "TEXT", scan)
+	err := scanSingleColumn(tx, query, args, "TEXT", scan)
 	if err != nil {
 		return nil, err
 	}
@@ -30,7 +30,7 @@ func SelectStrings(tx *sql.Tx, query string) ([]string, error) {
 
 // SelectIntegers executes a statement which must yield rows with a single integer
 // column. It returns the list of column values.
-func SelectIntegers(tx *sql.Tx, query string) ([]int, error) {
+func SelectIntegers(tx *sql.Tx, query string, args ...interface{}) ([]int, error) {
 	values := []int{}
 	scan := func(rows *sql.Rows) error {
 		var value int
@@ -42,7 +42,7 @@ func SelectIntegers(tx *sql.Tx, query string) ([]int, error) {
 		return nil
 	}
 
-	err := scanSingleColumn(tx, query, "INTEGER", scan)
+	err := scanSingleColumn(tx, query, args, "INTEGER", scan)
 	if err != nil {
 		return nil, err
 	}
@@ -76,8 +76,8 @@ func InsertStrings(tx *sql.Tx, stmt string, values []string) error {
 // Execute the given query and ensure that it yields rows with a single column
 // of the given database type. For every row yielded, execute the given
 // scanner.
-func scanSingleColumn(tx *sql.Tx, query string, typeName string, scan scanFunc) error {
-	rows, err := tx.Query(query)
+func scanSingleColumn(tx *sql.Tx, query string, args []interface{}, typeName string, scan scanFunc) error {
+	rows, err := tx.Query(query, args...)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/raft.go b/lxd/db/raft.go
new file mode 100644
index 000000000..40d6b29cb
--- /dev/null
+++ b/lxd/db/raft.go
@@ -0,0 +1,114 @@
+package db
+
+import (
+	"fmt"
+
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/pkg/errors"
+)
+
+// RaftNode holds information about a single node in the dqlite raft cluster.
+type RaftNode struct {
+	ID      int64  // Stable node identifier
+	Address string // Network address of the node
+}
+
+// RaftNodes returns information about all LXD nodes that are members of the
+// dqlite Raft cluster (possibly including the local node). If this LXD
+// instance is not running in clustered mode, an empty list is returned.
+func (n *NodeTx) RaftNodes() ([]RaftNode, error) {
+	nodes := []RaftNode{}
+	dest := func(i int) []interface{} {
+		nodes = append(nodes, RaftNode{})
+		return []interface{}{&nodes[i].ID, &nodes[i].Address}
+	}
+	err := query.SelectObjects(n.tx, dest, "SELECT id, address FROM raft_nodes ORDER BY id")
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to fecth raft nodes")
+	}
+	return nodes, nil
+}
+
+// RaftNodeAddresses returns the addresses of all LXD nodes that are members of
+// the dqlite Raft cluster (possibly including the local node). If this LXD
+// instance is not running in clustered mode, an empty list is returned.
+func (n *NodeTx) RaftNodeAddresses() ([]string, error) {
+	return query.SelectStrings(n.tx, "SELECT address FROM raft_nodes")
+}
+
+// RaftNodeAddress returns the address of the LXD raft node with the given ID,
+// if any matching row exists.
+func (n *NodeTx) RaftNodeAddress(id int64) (string, error) {
+	stmt := "SELECT address FROM raft_nodes WHERE id=?"
+	addresses, err := query.SelectStrings(n.tx, stmt, id)
+	if err != nil {
+		return "", err
+	}
+	switch len(addresses) {
+	case 0:
+		return "", NoSuchObjectError
+	case 1:
+		return addresses[0], nil
+	default:
+		// This should never happen since we have a UNIQUE constraint
+		// on the raft_nodes.id column.
+		return "", fmt.Errorf("more than one match found")
+	}
+}
+
+// RaftNodeFirst adds a the first node if the cluster. It ensures that the
+// database ID is 1, to match the server ID of first raft log entry.
+//
+// This method is supposed to be called when there are no rows in raft_nodes,
+// and it will replace whatever existing row has ID 1.
+func (n *NodeTx) RaftNodeFirst(address string) error {
+	columns := []string{"id", "address"}
+	values := []interface{}{int64(1), address}
+	id, err := query.UpsertObject(n.tx, "raft_nodes", columns, values)
+	if err != nil {
+		return err
+	}
+	if id != 1 {
+		return fmt.Errorf("could not set raft node ID to 1")
+	}
+	return nil
+}
+
+// RaftNodeAdd adds a node to the current list of LXD nodes that are part of the
+// dqlite Raft cluster. It returns the ID of the newly inserted row.
+func (n *NodeTx) RaftNodeAdd(address string) (int64, error) {
+	columns := []string{"address"}
+	values := []interface{}{address}
+	return query.UpsertObject(n.tx, "raft_nodes", columns, values)
+}
+
+// RaftNodeDelete removes a node from the current list of LXD nodes that are
+// part of the dqlite Raft cluster.
+func (n *NodeTx) RaftNodeDelete(id int64) error {
+	deleted, err := query.DeleteObject(n.tx, "raft_nodes", id)
+	if err != nil {
+		return err
+	}
+	if !deleted {
+		return NoSuchObjectError
+	}
+	return nil
+}
+
+// RaftNodesReplace replaces the current list of raft nodes.
+func (n *NodeTx) RaftNodesReplace(nodes []RaftNode) error {
+	_, err := n.tx.Exec("DELETE FROM raft_nodes")
+	if err != nil {
+		return err
+	}
+
+	columns := []string{"id", "address"}
+	for _, node := range nodes {
+		values := []interface{}{node.ID, node.Address}
+		_, err := query.UpsertObject(n.tx, "raft_nodes", columns, values)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/lxd/db/raft_test.go b/lxd/db/raft_test.go
new file mode 100644
index 000000000..dd74b8237
--- /dev/null
+++ b/lxd/db/raft_test.go
@@ -0,0 +1,133 @@
+package db_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Fetch all raft nodes.
+func TestRaftNodes(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	id1, err := tx.RaftNodeAdd("1.2.3.4:666")
+	require.NoError(t, err)
+
+	id2, err := tx.RaftNodeAdd("5.6.7.8:666")
+	require.NoError(t, err)
+
+	nodes, err := tx.RaftNodes()
+	require.NoError(t, err)
+
+	assert.Equal(t, id1, nodes[0].ID)
+	assert.Equal(t, id2, nodes[1].ID)
+	assert.Equal(t, "1.2.3.4:666", nodes[0].Address)
+	assert.Equal(t, "5.6.7.8:666", nodes[1].Address)
+}
+
+// Fetch the addresses of all raft nodes.
+func TestRaftNodeAddresses(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	_, err := tx.RaftNodeAdd("1.2.3.4:666")
+	require.NoError(t, err)
+
+	_, err = tx.RaftNodeAdd("5.6.7.8:666")
+	require.NoError(t, err)
+
+	addresses, err := tx.RaftNodeAddresses()
+	require.NoError(t, err)
+
+	assert.Equal(t, []string{"1.2.3.4:666", "5.6.7.8:666"}, addresses)
+}
+
+// Fetch the address of the raft node with the given ID.
+func TestRaftNodeAddress(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	_, err := tx.RaftNodeAdd("1.2.3.4:666")
+	require.NoError(t, err)
+
+	id, err := tx.RaftNodeAdd("5.6.7.8:666")
+	require.NoError(t, err)
+
+	address, err := tx.RaftNodeAddress(id)
+	require.NoError(t, err)
+	assert.Equal(t, "5.6.7.8:666", address)
+}
+
+// Add the first raft node.
+func TestRaftNodeFirst(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	err := tx.RaftNodeFirst("1.2.3.4:666")
+	assert.NoError(t, err)
+
+	err = tx.RaftNodeDelete(1)
+	assert.NoError(t, err)
+
+	err = tx.RaftNodeFirst("5.6.7.8:666")
+	assert.NoError(t, err)
+
+	address, err := tx.RaftNodeAddress(1)
+	require.NoError(t, err)
+	assert.Equal(t, "5.6.7.8:666", address)
+}
+
+// Add a new raft node.
+func TestRaftNodeAdd(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	id, err := tx.RaftNodeAdd("1.2.3.4:666")
+	assert.Equal(t, int64(1), id)
+	assert.NoError(t, err)
+}
+
+// Delete an existing raft node.
+func TestRaftNodeDelete(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	id, err := tx.RaftNodeAdd("1.2.3.4:666")
+	require.NoError(t, err)
+
+	err = tx.RaftNodeDelete(id)
+	assert.NoError(t, err)
+}
+
+// Delete a non-existing raft node returns an error.
+func TestRaftNodeDelete_NonExisting(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	err := tx.RaftNodeDelete(1)
+	assert.Equal(t, db.NoSuchObjectError, err)
+}
+
+// Replace all existing raft nodes.
+func TestRaftNodesReplace(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	_, err := tx.RaftNodeAdd("1.2.3.4:666")
+	require.NoError(t, err)
+
+	nodes := []db.RaftNode{
+		{ID: 2, Address: "2.2.2.2:666"},
+		{ID: 3, Address: "3.3.3.3:666"},
+	}
+	err = tx.RaftNodesReplace(nodes)
+	assert.NoError(t, err)
+
+	newNodes, err := tx.RaftNodes()
+	require.NoError(t, err)
+
+	assert.Equal(t, nodes, newNodes)
+}

From 747b025bc28019a2ca24b4f459a1b60912eb2035 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 11 Oct 2017 13:34:20 +0000
Subject: [PATCH 014/227] Add node.DetermineRole function to figure what role a
 node plays

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/node/raft.go      | 60 +++++++++++++++++++++++++++++++++++++++
 lxd/node/raft_test.go | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 137 insertions(+)
 create mode 100644 lxd/node/raft.go
 create mode 100644 lxd/node/raft_test.go

diff --git a/lxd/node/raft.go b/lxd/node/raft.go
new file mode 100644
index 000000000..8b4605356
--- /dev/null
+++ b/lxd/node/raft.go
@@ -0,0 +1,60 @@
+package node
+
+import "github.com/lxc/lxd/lxd/db"
+
+// DetermineRaftNode figures out what raft node ID and address we have, if any.
+//
+// This decision is based on the values of the core.https_address config key
+// and on the rows in the raft_nodes table, both stored in the node-level
+// SQLite database.
+//
+// The following rules are applied:
+//
+// - If no core.https_address config key is set, this is a non-clustered node
+//   and the returned RaftNode will have ID 1 but no address, to signal that
+//   the node should setup an in-memory raft cluster where the node itself
+//   is the only member and leader.
+//
+// - If core.https_address config key is set, but there is no row in the
+//   raft_nodes table, this is a non-clustered node as well, and same behavior
+//   as the previous case applies.
+//
+// - If core.https_address config key is set and there is at least one row in
+//   the raft_nodes table, then this node is considered a raft node if
+//   core.https_address matches one of the rows in raft_nodes. In that case,
+//   the matching db.RaftNode row is returned, otherwise nil.
+func DetermineRaftNode(tx *db.NodeTx) (*db.RaftNode, error) {
+	config, err := ConfigLoad(tx)
+	if err != nil {
+		return nil, err
+	}
+
+	address := config.HTTPSAddress()
+
+	// If core.https_address is the empty string, then this LXD instance is
+	// not running in clustering mode.
+	if address == "" {
+		return &db.RaftNode{ID: 1}, nil
+	}
+
+	nodes, err := tx.RaftNodes()
+	if err != nil {
+		return nil, err
+	}
+
+	// If core.https_address is set, but raft_nodes has no rows, this is
+	// still an instance not running in clustering mode.
+	if len(nodes) == 0 {
+		return &db.RaftNode{ID: 1}, nil
+	}
+
+	// If there is one or more row in raft_nodes, try to find a matching
+	// one.
+	for _, node := range nodes {
+		if node.Address == address {
+			return &node, nil
+		}
+	}
+
+	return nil, nil
+}
diff --git a/lxd/node/raft_test.go b/lxd/node/raft_test.go
new file mode 100644
index 000000000..b376bdc3f
--- /dev/null
+++ b/lxd/node/raft_test.go
@@ -0,0 +1,77 @@
+package node_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/mpvl/subtest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// The raft identity (ID and address) of a node depends on the value of
+// core.https_address and the entries of the raft_nodes table.
+func TestDetermineRaftNode(t *testing.T) {
+	cases := []struct {
+		title     string
+		address   string       // Value of core.https_address
+		addresses []string     // Entries in raft_nodes
+		node      *db.RaftNode // Expected node value
+	}{
+		{
+			`no core.https_address set`,
+			"",
+			[]string{},
+			&db.RaftNode{ID: 1},
+		},
+		{
+			`core.https_address set and and no raft_nodes rows`,
+			"1.2.3.4:8443",
+			[]string{},
+			&db.RaftNode{ID: 1},
+		},
+		{
+			`core.https_address set and matching the one and only raft_nodes row`,
+			"1.2.3.4:8443",
+			[]string{"1.2.3.4:8443"},
+			&db.RaftNode{ID: 1, Address: "1.2.3.4:8443"},
+		},
+		{
+			`core.https_address set and matching one of many raft_nodes rows`,
+			"5.6.7.8:999",
+			[]string{"1.2.3.4:666", "5.6.7.8:999"},
+			&db.RaftNode{ID: 2, Address: "5.6.7.8:999"},
+		},
+		{
+			`core.https_address set and no matching raft_nodes row`,
+			"1.2.3.4:666",
+			[]string{"5.6.7.8:999"},
+			nil,
+		},
+	}
+
+	for _, c := range cases {
+		subtest.Run(t, c.title, func(t *testing.T) {
+			tx, cleanup := db.NewTestNodeTx(t)
+			defer cleanup()
+
+			err := tx.UpdateConfig(map[string]string{"core.https_address": c.address})
+			require.NoError(t, err)
+
+			for _, address := range c.addresses {
+				_, err := tx.RaftNodeAdd(address)
+				require.NoError(t, err)
+			}
+
+			node, err := node.DetermineRaftNode(tx)
+			require.NoError(t, err)
+			if c.node == nil {
+				assert.Nil(t, node)
+			} else {
+				assert.Equal(t, c.node.ID, node.ID)
+				assert.Equal(t, c.node.Address, node.Address)
+			}
+		})
+	}
+}

From cedc584e25f0b82320090c8421133775949efa80 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 19 Aug 2017 20:58:27 +0000
Subject: [PATCH 015/227] Add sqlite submodule

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 .gitmodules                    |  4 ++++
 Makefile                       | 13 +++++++++++--
 lxd/.dir-locals.el             | 17 ++++++++++++++++-
 lxd/.go-rename-wrapper         |  7 +++++++
 lxd/.go-wrapper                |  7 +++++++
 lxd/sqlite                     |  1 +
 test/suites/static_analysis.sh |  9 ++++++++-
 7 files changed, 54 insertions(+), 4 deletions(-)
 create mode 100644 .gitmodules
 create mode 100755 lxd/.go-rename-wrapper
 create mode 100755 lxd/.go-wrapper
 create mode 160000 lxd/sqlite

diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..06ca26ad9
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,4 @@
+[submodule "lxd/sqlite"]
+	path = lxd/sqlite
+	url = https://github.com/CanonicalLtd/sqlite.git
+	ignore = dirty
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 891088074..3ad71816f 100644
--- a/Makefile
+++ b/Makefile
@@ -3,6 +3,7 @@ POFILES=$(wildcard po/*.po)
 MOFILES=$(patsubst %.po,%.mo,$(POFILES))
 LINGUAS=$(basename $(POFILES))
 POTFILE=po/$(DOMAIN).pot
+GO_SERVER=./lxd/.go-wrapper
 
 # dist is primarily for use when packaging; for development we still manage
 # dependencies via `go get` explicitly.
@@ -13,8 +14,8 @@ TAGS=$(shell printf "\#include <sqlite3.h>\nvoid main(){}" | $(CC) -o /dev/null
 
 .PHONY: default
 default:
-	go get -t -v -d ./...
-	go install -v $(TAGS) $(DEBUG) ./...
+	$(GO_SERVER) get -t -v -d ./...
+	$(GO_SERVER) install -v $(TAGS) $(DEBUG) ./...
 	@echo "LXD built successfully"
 
 .PHONY: client
@@ -105,6 +106,14 @@ update-pot:
 
 build-mo: $(MOFILES)
 
+.PHONY: build-sqlite
+build-sqlite:
+	cd lxd/sqlite && \
+	    git log -1 --format=format:%ci%n | sed -e 's/ [-+].*//;s/ /T/;s/^/D /' > manifest && \
+	    echo $(shell git log -1 --format=format:%H) > manifest.uuid && \
+	    ./configure && \
+	    make
+
 static-analysis:
 	(cd test;  /bin/sh -x -c ". suites/static_analysis.sh; test_static_analysis")
 
diff --git a/lxd/.dir-locals.el b/lxd/.dir-locals.el
index 9bebcc48c..315bd893b 100644
--- a/lxd/.dir-locals.el
+++ b/lxd/.dir-locals.el
@@ -1,3 +1,18 @@
 ;;; Directory Local Variables
 ;;; For more information see (info "(emacs) Directory Variables")
-((go-mode . ((go-test-args . "-tags libsqlite3"))))
+((go-mode
+  . ((go-test-args . "-tags libsqlite3 -timeout 10s")
+     (eval
+      . (set
+	 (make-local-variable 'flycheck-go-build-tags)
+	 '("libsqlite3")))
+     (eval
+      . (let* ((locals-path
+     		(let ((d (dir-locals-find-file ".")))
+     		  (if (stringp d) (file-name-directory d) (car d))))
+	       (go-wrapper (s-concat locals-path ".go-wrapper"))
+	       (go-rename-wrapper (s-concat locals-path ".go-rename-wrapper")))
+     	  (progn
+	    (set (make-local-variable 'go-command) go-wrapper)
+	    (set (make-local-variable 'flycheck-go-build-executable) go-wrapper)
+	    (set (make-local-variable 'go-rename-command) go-rename-wrapper)))))))
diff --git a/lxd/.go-rename-wrapper b/lxd/.go-rename-wrapper
new file mode 100755
index 000000000..1ad3ceefa
--- /dev/null
+++ b/lxd/.go-rename-wrapper
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+export CGO_CFLAGS="-I$(realpath $(dirname $0))/sqlite/"
+export CGO_LDFLAGS="-L$(realpath $(dirname $0))/sqlite/.libs"
+export LD_LIBRARY_PATH="$(realpath $(dirname $0))/sqlite/.libs"
+
+gorename $@
diff --git a/lxd/.go-wrapper b/lxd/.go-wrapper
new file mode 100755
index 000000000..9fd28b735
--- /dev/null
+++ b/lxd/.go-wrapper
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+export CGO_CFLAGS="-I$(realpath $(dirname $0))/sqlite/"
+export CGO_LDFLAGS="-L$(realpath $(dirname $0))/sqlite/.libs"
+export LD_LIBRARY_PATH="$(realpath $(dirname $0))/sqlite/.libs"
+
+go $@
diff --git a/lxd/sqlite b/lxd/sqlite
new file mode 160000
index 000000000..235392610
--- /dev/null
+++ b/lxd/sqlite
@@ -0,0 +1 @@
+Subproject commit 235392610287d85dda11a6eee4d6e34d7cc6ef3f
diff --git a/test/suites/static_analysis.sh b/test/suites/static_analysis.sh
index b0d8672ce..d834d1495 100644
--- a/test/suites/static_analysis.sh
+++ b/test/suites/static_analysis.sh
@@ -22,8 +22,15 @@ test_static_analysis() {
     fi
 
     # Go static analysis
+    CGO_CFLAGS="-I$(pwd)/lxd/sqlite/"
+    CGO_LDFLAGS="-L$(pwd)/lxd/sqlite/.libs"
+    LD_LIBRARY_PATH="$(pwd)/lxd/sqlite/.libs"
+    export CGO_CFLAGS
+    export CGO_LDFLAGS
+    export LD_LIBRARY_PATH
+
     ## Functions starting by empty line
-    OUT=$(grep -r "^$" -B1 . | grep "func " | grep -v "}$" || true)
+    OUT=$(grep -r "^$" -B1 . | grep "func " | grep -v "}$" | grep -v "./lxd/sqlite/" || true)
     if [ -n "${OUT}" ]; then
       echo "ERROR: Functions must not start with an empty line: ${OUT}"
       false

From cad685d5734c1855ffd8e505c0ec12688f108701 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 12 Oct 2017 17:47:30 +0000
Subject: [PATCH 016/227] Add cluster.newRaft APIs to bring up a LXD-specific
 raft instance

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_internal.go             |   2 +-
 lxd/cluster/gateway.go          |   1 +
 lxd/cluster/raft.go             | 420 ++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/raft_export_test.go |  19 ++
 lxd/cluster/raft_test.go        | 143 ++++++++++++++
 lxd/cluster/tls.go              |  35 ++++
 lxd/daemon.go                   |  75 ++++---
 lxd/endpoints/network.go        |  17 +-
 lxd/main_test.go                |   9 +-
 lxd/sys/fs.go                   |   1 +
 lxd/util/net.go                 |  24 +++
 11 files changed, 696 insertions(+), 50 deletions(-)
 create mode 100644 lxd/cluster/raft.go
 create mode 100644 lxd/cluster/raft_export_test.go
 create mode 100644 lxd/cluster/raft_test.go
 create mode 100644 lxd/cluster/tls.go

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 4a253db7f..b5a568d8d 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -41,7 +41,7 @@ func internalWaitReady(d *Daemon, r *http.Request) Response {
 }
 
 func internalShutdown(d *Daemon, r *http.Request) Response {
-	d.shutdownChan <- true
+	d.shutdownChan <- struct{}{}
 
 	return EmptySyncResponse
 }
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 41aee225b..10a560aca 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -93,6 +93,7 @@ func (g *Gateway) init() error {
 func grpcMemoryDial(dial func() net.Conn) func() (*grpc.ClientConn, error) {
 	options := []grpc.DialOption{
 		grpc.WithInsecure(),
+		grpc.WithBlock(),
 		grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
 			return dial(), nil
 		}),
diff --git a/lxd/cluster/raft.go b/lxd/cluster/raft.go
new file mode 100644
index 000000000..0b24ff8b9
--- /dev/null
+++ b/lxd/cluster/raft.go
@@ -0,0 +1,420 @@
+package cluster
+
+import (
+	"bytes"
+	"crypto/x509"
+	"fmt"
+	"log"
+	"math"
+	"net"
+	"net/http"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/CanonicalLtd/dqlite"
+	"github.com/CanonicalLtd/raft-http"
+	"github.com/CanonicalLtd/raft-membership"
+	"github.com/boltdb/bolt"
+	"github.com/hashicorp/raft"
+	"github.com/hashicorp/raft-boltdb"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
+	"github.com/pkg/errors"
+	log15 "gopkg.in/inconshreveable/log15.v2"
+)
+
+// Create a raft instance and all its dependencies, to be used as backend for
+// the dqlite driver running on this LXD node.
+//
+// If this node should not serve as dqlite node, nil is returned.
+//
+// The raft instance will use an in-memory transport if clustering is not
+// enabled on this node.
+//
+// The certInfo parameter should contain the cluster TLS keypair and optional
+// CA certificate.
+//
+// The latency parameter is a coarse grain measure of how fast/reliable network
+// links are. This is used to tweak the various timeouts parameters of the raft
+// algorithm. See the raft.Config structure for more details. A value of 1.0
+// means use the default values from hashicorp's raft package. Values closer to
+// 0 reduce the values of the various timeouts (useful when running unit tests
+// in-memory).
+func newRaft(database *db.Node, cert *shared.CertInfo, latency float64) (*raftInstance, error) {
+	if latency <= 0 {
+		return nil, fmt.Errorf("latency should be positive")
+	}
+
+	// Figure out if we actually need to act as dqlite node.
+	var info *db.RaftNode
+	err := database.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		info, err = node.DetermineRaftNode(tx)
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// If we're not part of the dqlite cluster, there's nothing to do.
+	if info == nil {
+		return nil, nil
+	}
+	logger.Info("Start database node", log15.Ctx{"id": info.ID, "address": info.Address})
+
+	// Initialize a raft instance along with all needed dependencies.
+	instance, err := raftInstanceInit(database, info, cert, latency)
+	if err != nil {
+		return nil, err
+	}
+
+	return instance, nil
+}
+
+// A LXD-specific wrapper around raft.Raft, which also holds a reference to its
+// network transport and dqlite FSM.
+type raftInstance struct {
+	layer             *rafthttp.Layer       // HTTP-based raft transport layer
+	handler           http.HandlerFunc      // Handles join/leave/connect requests
+	membershipChanger func(*raft.Raft)      // Forwards to raft membership requests from handler
+	logs              *raftboltdb.BoltStore // Raft logs store, needs to be closed upon shutdown
+	fsm               raft.FSM              // The dqlite FSM linked to the raft instance
+	raft              *raft.Raft            // The actual raft instance
+}
+
+// Create a new raftFactory, instantiating all needed raft dependencies.
+func raftInstanceInit(
+	db *db.Node, node *db.RaftNode, cert *shared.CertInfo, latency float64) (*raftInstance, error) {
+	// FIXME: should be a parameter
+	timeout := 5 * time.Second
+
+	logger := raftLogger()
+
+	// Raft config.
+	config := raftConfig(latency)
+	config.Logger = logger
+	config.LocalID = raft.ServerID(strconv.Itoa(int(node.ID)))
+
+	// Raft transport
+	var handler *rafthttp.Handler
+	var membershipChanger func(*raft.Raft)
+	var layer *rafthttp.Layer
+	var transport raft.Transport
+	addr := node.Address
+	if addr == "" {
+		// This should normally be used only for testing as it can
+		// cause split-brian, but since we are not exposing raft to the
+		// network at all it's safe to do so. When this node gets
+		// exposed to the network and assigned an address, we need to
+		// restart raft anyways.
+		config.StartAsLeader = true
+		transport = raftMemoryTransport()
+	} else {
+		dial, err := raftDial(cert)
+		if err != nil {
+			return nil, err
+		}
+
+		transport, handler, layer, err = raftNetworkTransport(db, addr, logger, timeout, dial)
+		if err != nil {
+			return nil, err
+		}
+		membershipChanger = func(raft *raft.Raft) {
+			raftmembership.HandleChangeRequests(raft, handler.Requests())
+		}
+	}
+
+	err := raft.ValidateConfig(config)
+	if err != nil {
+		return nil, errors.Wrap(err, "invalid raft configuration")
+	}
+
+	// Data directory
+	dir := filepath.Join(db.Dir(), "raft")
+	if !shared.PathExists(dir) {
+		err := os.Mkdir(dir, 0750)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Raft logs store
+	logs, err := raftboltdb.New(raftboltdb.Options{
+		Path:        filepath.Join(dir, "logs.db"),
+		BoltOptions: &bolt.Options{Timeout: timeout},
+	})
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to create bolt store for raft logs")
+	}
+
+	// Raft snapshot store
+	snaps, err := raft.NewFileSnapshotStoreWithLogger(dir, 2, logger)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to create file snapshot store")
+	}
+
+	// If we are the initial node, we use the last index persisted in the
+	// logs store and other checks to determine if we have ever
+	// bootstrapped the cluster, and if not we do so (see raft.HasExistingState).
+	if node.ID == 1 {
+		err := raftMaybeBootstrap(config, logs, snaps, transport)
+		if err != nil {
+			return nil, errors.Wrap(err, "failed to boostrap cluster")
+		}
+	}
+
+	// The dqlite FSM.
+	fsm := dqlite.NewFSM(dir)
+
+	// The actual raft instance.
+	raft, err := raft.NewRaft(config, fsm, logs, logs, snaps, transport)
+	if err != nil {
+		logs.Close()
+		return nil, errors.Wrap(err, "failed to start raft")
+	}
+
+	if membershipChanger != nil {
+		// Process Raft connections over HTTP. This goroutine will
+		// terminate when instance.handler.Close() is called, which
+		// happens indirectly when the raft instance is shutdown in
+		// instance.Shutdown(), and the associated transport is closed.
+		go membershipChanger(raft)
+	}
+
+	instance := &raftInstance{
+		layer:             layer,
+		handler:           raftHandler(cert, handler),
+		membershipChanger: membershipChanger,
+		logs:              logs,
+		fsm:               fsm,
+		raft:              raft,
+	}
+
+	return instance, nil
+}
+
+// FSM returns the dqlite FSM associated with the raft instance.
+func (i *raftInstance) FSM() raft.FSM {
+	return i.fsm
+}
+
+// Raft returns the actual underlying raft instance.
+func (i *raftInstance) Raft() *raft.Raft {
+	return i.raft
+}
+
+// HandlerFunc can be used to handle HTTP requests performed against the LXD
+// API RaftEndpoint ("/internal/raft"), in order to join/leave/form the raft
+// cluster.
+//
+// If it returns nil, it means that this node is not supposed to expose a raft
+// endpoint over the network, because it's running as a non-clustered single
+// node.
+func (i *raftInstance) HandlerFunc() http.HandlerFunc {
+	if i.handler == nil {
+		return nil
+	}
+	return i.handler.ServeHTTP
+}
+
+// MembershipChanger returns the underlying rafthttp.Layer, which can be used
+// to change the membership of this node in the cluster.
+func (i *raftInstance) MembershipChanger() raftmembership.Changer {
+	return i.layer
+}
+
+// Shutdown raft and any raft-related resource we have instantiated.
+func (i *raftInstance) Shutdown() error {
+	logger.Info("Stop database node")
+	err := i.raft.Shutdown().Error()
+	if err != nil {
+		return errors.Wrap(err, "failed to shutdown raft")
+	}
+	err = i.logs.Close()
+	if err != nil {
+		return errors.Wrap(err, "failed to close boltdb logs store")
+	}
+	return nil
+}
+
+// Create an in-memory raft transport.
+func raftMemoryTransport() raft.Transport {
+	_, transport := raft.NewInmemTransport("0")
+	return transport
+}
+
+// Create a rafthttp.Dial function that connects over TLS using the given
+// cluster (and optionally CA) certificate both as client and remote
+// certificate.
+func raftDial(cert *shared.CertInfo) (rafthttp.Dial, error) {
+	config, err := tlsClientConfig(cert)
+	if err != nil {
+		return nil, err
+	}
+	dial := rafthttp.NewDialTLS(config)
+	return dial, nil
+}
+
+// Create a network raft transport that will handle connections using a
+// rafthttp.Handler.
+func raftNetworkTransport(
+	db *db.Node,
+	address string,
+	logger *log.Logger,
+	timeout time.Duration,
+	dial rafthttp.Dial) (raft.Transport, *rafthttp.Handler, *rafthttp.Layer, error) {
+	handler := rafthttp.NewHandler()
+	addr, err := net.ResolveTCPAddr("tcp", address)
+	if err != nil {
+		return nil, nil, nil, errors.Wrap(err, "invalid node address")
+	}
+
+	layer := rafthttp.NewLayer(raftEndpoint, addr, handler, dial)
+	config := &raft.NetworkTransportConfig{
+		Logger:                logger,
+		Stream:                layer,
+		MaxPool:               2,
+		Timeout:               timeout,
+		ServerAddressProvider: &raftAddressProvider{db: db},
+	}
+	transport := raft.NewNetworkTransportWithConfig(config)
+
+	return transport, handler, layer, nil
+}
+
+// The LXD API endpoint path that gets routed to a rafthttp.Handler for
+// joining/leaving the cluster and exchanging raft commands between nodes.
+const raftEndpoint = "/internal/raft"
+
+// An address provider that looks up server addresses in the raft_nodes table.
+type raftAddressProvider struct {
+	db *db.Node
+}
+
+func (p *raftAddressProvider) ServerAddr(id raft.ServerID) (raft.ServerAddress, error) {
+	databaseID, err := strconv.Atoi(string(id))
+	if err != nil {
+		return "", errors.Wrap(err, "non-numeric server ID")
+	}
+	var address string
+	err = p.db.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		address, err = tx.RaftNodeAddress(int64(databaseID))
+		return err
+	})
+	if err != nil {
+		return "", err
+	}
+	return raft.ServerAddress(address), nil
+}
+
+// Create a base raft configuration tweaked for a network with the given latency measure.
+func raftConfig(latency float64) *raft.Config {
+	config := raft.DefaultConfig()
+	scale := func(duration *time.Duration) {
+		*duration = time.Duration((math.Ceil(float64(*duration) * latency)))
+	}
+	durations := []*time.Duration{
+		&config.HeartbeatTimeout,
+		&config.ElectionTimeout,
+		&config.CommitTimeout,
+		&config.LeaderLeaseTimeout,
+	}
+	for _, duration := range durations {
+		scale(duration)
+	}
+	return config
+}
+
+// Helper to bootstrap the raft cluster if needed.
+func raftMaybeBootstrap(
+	conf *raft.Config,
+	logs *raftboltdb.BoltStore,
+	snaps raft.SnapshotStore,
+	trans raft.Transport) error {
+	// First check if we were already bootstrapped.
+	hasExistingState, err := raft.HasExistingState(logs, logs, snaps)
+	if err != nil {
+		return errors.Wrap(err, "failed to check if raft has existing state")
+	}
+	if hasExistingState {
+		return nil
+	}
+	server := raft.Server{
+		ID:      conf.LocalID,
+		Address: trans.LocalAddr(),
+	}
+	configuration := raft.Configuration{
+		Servers: []raft.Server{server},
+	}
+	return raft.BootstrapCluster(conf, logs, logs, snaps, trans, configuration)
+}
+
+func raftHandler(info *shared.CertInfo, handler *rafthttp.Handler) http.HandlerFunc {
+	if handler == nil {
+		return nil
+	}
+	cert, err := x509.ParseCertificate(info.KeyPair().Certificate[0])
+	if err != nil {
+		// Since we have already loaded this certificate, typically
+		// using LoadX509KeyPair, an error should never happen, but
+		// check for good measure.
+		panic(fmt.Sprintf("invalid keypair material: %v", err))
+	}
+	trustedCerts := []x509.Certificate{*cert}
+	return func(w http.ResponseWriter, r *http.Request) {
+		if r.TLS == nil || !util.CheckTrustState(*r.TLS.PeerCertificates[0], trustedCerts) {
+			http.Error(w, "403 invalid client certificate", http.StatusForbidden)
+		}
+		handler.ServeHTTP(w, r)
+	}
+}
+
+func raftLogger() *log.Logger {
+	return log.New(&raftLogWriter{}, "", 0)
+}
+
+// Implement io.Writer on top of LXD's logging system.
+type raftLogWriter struct {
+}
+
+func (o *raftLogWriter) Write(line []byte) (n int, err error) {
+	// Parse the log level according to hashicorp's raft pkg convetions.
+	level := ""
+	msg := ""
+	x := bytes.IndexByte(line, '[')
+	if x >= 0 {
+		y := bytes.IndexByte(line[x:], ']')
+		if y >= 0 {
+			level = string(line[x+1 : x+y])
+
+			// Capitalize the string, to match LXD logging conventions
+			first := strings.ToUpper(string(line[x+y+2]))
+			rest := string(line[x+y+3 : len(line)-1])
+			msg = first + rest
+		}
+	}
+
+	if level == "" {
+		// Ignore log entries that don't stick to the convetion.
+		return len(line), nil
+	}
+
+	switch level {
+	case "DEBUG":
+		logger.Debug(msg)
+	case "INFO":
+		logger.Info(msg)
+	case "WARN":
+		logger.Warn(msg)
+	default:
+		// Ignore any other log level.
+	}
+	return len(line), nil
+}
diff --git a/lxd/cluster/raft_export_test.go b/lxd/cluster/raft_export_test.go
new file mode 100644
index 000000000..e4b7c6dc6
--- /dev/null
+++ b/lxd/cluster/raft_export_test.go
@@ -0,0 +1,19 @@
+package cluster
+
+import (
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/shared"
+)
+
+// Export raft-related APIs for black box unit testing.
+func NewRaft(db *db.Node, cert *shared.CertInfo, latency float64) (*RaftInstance, error) {
+	instance, err := newRaft(db, cert, latency)
+	if err != nil {
+		return nil, err
+	}
+	return &RaftInstance{*instance}, nil
+}
+
+type RaftInstance struct {
+	raftInstance
+}
diff --git a/lxd/cluster/raft_test.go b/lxd/cluster/raft_test.go
new file mode 100644
index 000000000..9e6cfb983
--- /dev/null
+++ b/lxd/cluster/raft_test.go
@@ -0,0 +1,143 @@
+package cluster_test
+
+import (
+	"net/http"
+	"net/http/httptest"
+	"strconv"
+	"testing"
+	"time"
+
+	"github.com/CanonicalLtd/raft-test"
+	"github.com/hashicorp/raft"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logging"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// By default a node starts in single mode.
+func TestRaftFactory_Single(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	instance := newRaft(t, db, cert)
+	defer instance.Shutdown()
+
+	rafttest.WaitLeader(t, instance.Raft(), time.Second)
+	assert.Equal(t, raft.Leader, instance.Raft().State())
+}
+
+// If there's a network address configured, but we are the only raft node in
+// the factory starts raft in single mode.
+func TestRaftFactory_SingleWithNetworkAddress(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	setRaftRole(t, db, "1.2.3.4:666")
+
+	instance := newRaft(t, db, cert)
+	defer instance.Shutdown()
+
+	rafttest.WaitLeader(t, instance.Raft(), time.Second)
+	assert.Equal(t, raft.Leader, instance.Raft().State())
+}
+
+// When the factory is started the first time on a non-clustered node, it will
+// use the memory transport and the raft node will not have a real network
+// address. The in-memory address gets saved in the first log committed in the
+// store as the address of the server with ID "1". If the LXD instance is then
+// reconfigured to enable clustering, we now use a real network transport and
+// setup a ServerAddressProvider that will override the initial in-memory
+// address of node "1" with its real network address, as configured in the
+// raft_nodes table.
+func TestRaftFactory_TransitionToClusteredMode(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	instance := newRaft(t, db, cert)
+	instance.Shutdown()
+
+	setRaftRole(t, db, "1.2.3.4:666")
+
+	instance = newRaft(t, db, cert)
+	defer instance.Shutdown()
+
+	rafttest.WaitLeader(t, instance.Raft(), time.Second)
+	assert.Equal(t, raft.Leader, instance.Raft().State())
+}
+
+// If there is more than one node, the raft object is created with
+// cluster-compatible parameters..
+func TestRaftFactory_MultiNode(t *testing.T) {
+	cert := shared.TestingKeyPair()
+
+	leader := ""
+	for i := 0; i < 2; i++ {
+		db, cleanup := db.NewTestNode(t)
+		defer cleanup()
+
+		mux := http.NewServeMux()
+		server := newServer(cert, mux)
+		defer server.Close()
+
+		address := server.Listener.Addr().String()
+		setRaftRole(t, db, address)
+
+		instance := newRaft(t, db, cert)
+		defer instance.Shutdown()
+		if i == 0 {
+			leader = address
+			rafttest.WaitLeader(t, instance.Raft(), time.Second)
+		}
+
+		mux.HandleFunc("/internal/raft", instance.HandlerFunc())
+
+		if i > 0 {
+			id := raft.ServerID(strconv.Itoa(i + 1))
+			target := raft.ServerAddress(leader)
+			err := instance.MembershipChanger().Join(id, target, 5*time.Second)
+			require.NoError(t, err)
+		}
+	}
+}
+
+// Create a new test RaftInstance.
+func newRaft(t *testing.T, db *db.Node, cert *shared.CertInfo) *cluster.RaftInstance {
+	logging.Testing(t)
+	instance, err := cluster.NewRaft(db, cert, 0.2)
+	require.NoError(t, err)
+	return instance
+}
+
+// Set the core.https_address config key to the given address, and insert the
+// address into the raft_nodes table.
+//
+// This effectively makes the node act as a database raft node.
+func setRaftRole(t *testing.T, database *db.Node, address string) {
+	require.NoError(t, database.Transaction(func(tx *db.NodeTx) error {
+		err := tx.UpdateConfig(map[string]string{"core.https_address": address})
+		if err != nil {
+			return err
+		}
+		_, err = tx.RaftNodeAdd(address)
+		return err
+	}))
+}
+
+// Create a new test HTTP server configured with the given TLS certificate and
+// using the given handler.
+func newServer(cert *shared.CertInfo, handler http.Handler) *httptest.Server {
+	server := httptest.NewUnstartedServer(handler)
+	server.TLS = util.ServerTLSConfig(cert)
+	server.StartTLS()
+	return server
+}
diff --git a/lxd/cluster/tls.go b/lxd/cluster/tls.go
new file mode 100644
index 000000000..aa9b75731
--- /dev/null
+++ b/lxd/cluster/tls.go
@@ -0,0 +1,35 @@
+package cluster
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+
+	"github.com/lxc/lxd/shared"
+)
+
+// Return a TLS configuration suitable for establishing inter-node network
+// connections using the cluster certificate.
+func tlsClientConfig(info *shared.CertInfo) (*tls.Config, error) {
+	keypair := info.KeyPair()
+	ca := info.CA()
+	config := shared.InitTLSConfig()
+	config.Certificates = []tls.Certificate{keypair}
+	config.RootCAs = x509.NewCertPool()
+	if ca != nil {
+		config.RootCAs.AddCert(ca)
+	}
+	// Since the same cluster keypair is used both as server and as client
+	// cert, let's add it to the CA pool to make it trusted.
+	cert, err := x509.ParseCertificate(keypair.Certificate[0])
+	if err != nil {
+		return nil, err
+	}
+	cert.IsCA = true
+	cert.KeyUsage = x509.KeyUsageCertSign
+	config.RootCAs.AddCert(cert)
+
+	if cert.DNSNames != nil {
+		config.ServerName = cert.DNSNames[0]
+	}
+	return config, nil
+}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 59468c3e4..a78ec69ee 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -48,8 +48,9 @@ type Daemon struct {
 	db           *db.Node
 	maas         *maas.Controller
 	cluster      *db.Cluster
-	readyChan    chan bool
-	shutdownChan chan bool
+	setupChan    chan struct{} // Closed when basic Daemon setup is completed
+	readyChan    chan struct{} // Closed when LXD is fully ready
+	shutdownChan chan struct{}
 
 	// Tasks registry for long-running background tasks.
 	tasks task.Group
@@ -82,8 +83,11 @@ type DaemonConfig struct {
 // NewDaemon returns a new Daemon object with the given configuration.
 func NewDaemon(config *DaemonConfig, os *sys.OS) *Daemon {
 	return &Daemon{
-		config: config,
-		os:     os,
+		config:       config,
+		os:           os,
+		setupChan:    make(chan struct{}),
+		readyChan:    make(chan struct{}),
+		shutdownChan: make(chan struct{}),
 	}
 }
 
@@ -206,6 +210,10 @@ func (d *Daemon) createCmd(restAPI *mux.Router, version string, c Command) {
 	restAPI.HandleFunc(uri, func(w http.ResponseWriter, r *http.Request) {
 		w.Header().Set("Content-Type", "application/json")
 
+		// Block public API requests until we're done with basic
+		// initialization tasks, such setting up the cluster database.
+		<-d.setupChan
+
 		untrustedOk := (r.Method == "GET" && c.untrustedGet) || (r.Method == "POST" && c.untrustedPost)
 		err := d.checkTrustedClient(r)
 		if err == nil {
@@ -339,10 +347,6 @@ func (d *Daemon) Init() error {
 }
 
 func (d *Daemon) init() error {
-	/* Initialize some variables */
-	d.readyChan = make(chan bool)
-	d.shutdownChan = make(chan bool)
-
 	/* Set the LVM environment */
 	err := os.Setenv("LVM_SUPPRESS_FD_WARNINGS", "1")
 	if err != nil {
@@ -409,6 +413,20 @@ func (d *Daemon) init() error {
 		return errors.Wrap(err, "failed to open cluster database")
 	}
 
+	/* Setup the web server */
+	config := &endpoints.Config{
+		Dir:                  d.os.VarDir,
+		Cert:                 certInfo,
+		RestServer:           RestServer(d),
+		DevLxdServer:         DevLxdServer(d),
+		LocalUnixSocketGroup: d.config.Group,
+		NetworkAddress:       address,
+	}
+	d.endpoints, err = endpoints.Up(config)
+	if err != nil {
+		return err
+	}
+
 	/* Read the storage pools */
 	err = SetupStorageDriver(d.State(), false)
 	if err != nil {
@@ -462,19 +480,7 @@ func (d *Daemon) init() error {
 		return err
 	}
 
-	/* Setup the web server */
-	config := &endpoints.Config{
-		Dir:                  d.os.VarDir,
-		Cert:                 certInfo,
-		RestServer:           RestServer(d),
-		DevLxdServer:         DevLxdServer(d),
-		LocalUnixSocketGroup: d.config.Group,
-		NetworkAddress:       address,
-	}
-	d.endpoints, err = endpoints.Up(config)
-	if err != nil {
-		return fmt.Errorf("cannot start API endpoints: %v", err)
-	}
+	close(d.setupChan)
 
 	// Run the post initialization actions
 	err = d.Ready()
@@ -554,17 +560,10 @@ func (d *Daemon) Stop() error {
 
 	trackError(d.tasks.Stop(time.Second)) // Give tasks at most a second to cleanup.
 
+	shouldUnmount := false
 	if d.db != nil {
 		if n, err := d.numRunningContainers(); err != nil || n == 0 {
-			logger.Infof("Unmounting temporary filesystems")
-
-			syscall.Unmount(shared.VarPath("devlxd"), syscall.MNT_DETACH)
-			syscall.Unmount(shared.VarPath("shmounts"), syscall.MNT_DETACH)
-
-			logger.Infof("Done unmounting temporary filesystems")
-		} else {
-			logger.Debugf(
-				"Not unmounting temporary filesystems (containers are still running)")
+			shouldUnmount = true
 		}
 
 		logger.Infof("Closing the database")
@@ -580,6 +579,22 @@ func (d *Daemon) Stop() error {
 		trackError(d.endpoints.Down())
 	}
 
+	if d.endpoints != nil {
+		trackError(d.endpoints.Down())
+	}
+
+	if shouldUnmount {
+		logger.Infof("Unmounting temporary filesystems")
+
+		syscall.Unmount(shared.VarPath("devlxd"), syscall.MNT_DETACH)
+		syscall.Unmount(shared.VarPath("shmounts"), syscall.MNT_DETACH)
+
+		logger.Infof("Done unmounting temporary filesystems")
+	} else {
+		logger.Debugf(
+			"Not unmounting temporary filesystems (containers are still running)")
+	}
+
 	logger.Infof("Saving simplestreams cache")
 	trackError(imageSaveStreamCache(d.os))
 	logger.Infof("Saved simplestreams cache")
diff --git a/lxd/endpoints/network.go b/lxd/endpoints/network.go
index 01c169b0f..5da1bc573 100644
--- a/lxd/endpoints/network.go
+++ b/lxd/endpoints/network.go
@@ -2,7 +2,6 @@ package endpoints
 
 import (
 	"crypto/tls"
-	"crypto/x509"
 	"fmt"
 	"net"
 	"sync"
@@ -137,22 +136,10 @@ func (l *networkListener) Accept() (net.Conn, error) {
 
 // Config safely swaps the underlying TLS configuration.
 func (l *networkListener) Config(cert *shared.CertInfo) {
-	config := shared.InitTLSConfig()
-	config.ClientAuth = tls.RequestClientCert
-	config.Certificates = []tls.Certificate{cert.KeyPair()}
-
-	if cert.CA() != nil {
-		pool := x509.NewCertPool()
-		pool.AddCert(cert.CA())
-		config.RootCAs = pool
-		config.ClientCAs = pool
-
-		logger.Infof("LXD is in CA mode, only CA-signed certificates will be allowed")
-	}
-
-	config.BuildNameToCertificate()
+	config := util.ServerTLSConfig(cert)
 
 	l.mu.Lock()
 	defer l.mu.Unlock()
+
 	l.config = config
 }
diff --git a/lxd/main_test.go b/lxd/main_test.go
index 0f05ac700..5555e199e 100644
--- a/lxd/main_test.go
+++ b/lxd/main_test.go
@@ -62,8 +62,6 @@ func (suite *lxdTestSuite) SetupTest() {
 		suite.T().Fatalf("failed to start daemon: %v", err)
 	}
 
-	daemonConfigInit(suite.d.db.DB())
-
 	// Create default storage pool. Make sure that we don't pass a nil to
 	// the next function.
 	poolConfig := map[string]string{}
@@ -107,8 +105,11 @@ func (suite *lxdTestSuite) SetupTest() {
 }
 
 func (suite *lxdTestSuite) TearDownTest() {
-	suite.d.Stop()
-	err := os.RemoveAll(suite.tmpdir)
+	err := suite.d.Stop()
+	if err != nil {
+		suite.T().Fatalf("failed to stop daemon: %v", err)
+	}
+	err = os.RemoveAll(suite.tmpdir)
 	if err != nil {
 		suite.T().Fatalf("failed to remove temp dir: %v", err)
 	}
diff --git a/lxd/sys/fs.go b/lxd/sys/fs.go
index d3eff1edf..c8550fc3a 100644
--- a/lxd/sys/fs.go
+++ b/lxd/sys/fs.go
@@ -13,6 +13,7 @@ func (s *OS) initDirs() error {
 	}{
 		{s.VarDir, 0711},
 		{s.CacheDir, 0700},
+		{filepath.Join(s.VarDir, "raft"), 0700},
 		{filepath.Join(s.VarDir, "containers"), 0711},
 		{filepath.Join(s.VarDir, "devices"), 0711},
 		{filepath.Join(s.VarDir, "devlxd"), 0755},
diff --git a/lxd/util/net.go b/lxd/util/net.go
index 0e368b0f4..aa94af2a1 100644
--- a/lxd/util/net.go
+++ b/lxd/util/net.go
@@ -1,10 +1,13 @@
 package util
 
 import (
+	"crypto/tls"
+	"crypto/x509"
 	"fmt"
 	"net"
 
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
 )
 
 // InMemoryNetwork creates a fully in-memory listener and dial function.
@@ -79,3 +82,24 @@ func CanonicalNetworkAddress(address string) string {
 	}
 	return address
 }
+
+// ServerTLSConfig returns a new server-side tls.Config generated from the give
+// certificate info.
+func ServerTLSConfig(cert *shared.CertInfo) *tls.Config {
+	config := shared.InitTLSConfig()
+	config.ClientAuth = tls.RequestClientCert
+	config.Certificates = []tls.Certificate{cert.KeyPair()}
+	config.NextProtos = []string{"h2"} // Required by gRPC
+
+	if cert.CA() != nil {
+		pool := x509.NewCertPool()
+		pool.AddCert(cert.CA())
+		config.RootCAs = pool
+		config.ClientCAs = pool
+
+		logger.Infof("LXD is in CA mode, only CA-signed certificates will be allowed")
+	}
+
+	config.BuildNameToCertificate()
+	return config
+}

From 7feb54c77b374e2576435ffef49f48cc27714d2f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 11:39:18 +0000
Subject: [PATCH 017/227] Add actual dqlite backend to cluster.Gateway

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api.go                  |   4 +
 lxd/cluster/gateway.go      | 197 ++++++++++++++++++++++++++++++++++++++++++--
 lxd/cluster/gateway_test.go |  39 +++++++++
 lxd/cluster/raft.go         |   2 +-
 lxd/daemon.go               |   3 +-
 test/includes/lxd.sh        |  11 ++-
 6 files changed, 244 insertions(+), 12 deletions(-)

diff --git a/lxd/api.go b/lxd/api.go
index 94ba9c285..e038e76dc 100644
--- a/lxd/api.go
+++ b/lxd/api.go
@@ -21,6 +21,10 @@ func RestServer(d *Daemon) *http.Server {
 		SyncResponse(true, []string{"/1.0"}).Render(w)
 	})
 
+	for endpoint, f := range d.gateway.HandlerFuncs() {
+		mux.HandleFunc(endpoint, f)
+	}
+
 	for _, c := range api10 {
 		d.createCmd(mux, "1.0", c)
 	}
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 10a560aca..1c8d65924 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -1,15 +1,22 @@
 package cluster
 
 import (
+	"fmt"
 	"net"
+	"net/http"
 	"time"
 
+	"github.com/CanonicalLtd/dqlite"
 	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/hashicorp/raft"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
-	"github.com/mattn/go-sqlite3"
+	"github.com/lxc/lxd/shared/logger"
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
 	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials"
 )
 
 // NewGateway creates a new Gateway for managing access to the dqlite cluster.
@@ -45,6 +52,10 @@ type Gateway struct {
 	cert    *shared.CertInfo
 	latency float64
 
+	// The raft instance to use for creating the dqlite driver. It's nil if
+	// this LXD node is not supposed to be part of the raft cluster.
+	raft *raftInstance
+
 	// The gRPC server exposing the dqlite driver created by this
 	// gateway. It's nil if this LXD node is not supposed to be part of the
 	// raft cluster.
@@ -59,12 +70,77 @@ type Gateway struct {
 	memoryDial func() (*grpc.ClientConn, error)
 }
 
+// HandlerFuncs returns the HTTP handlers that should be added to the REST API
+// endpoint in order to handle database-related requests.
+//
+// There are two handlers, one for the /internal/raft endpoint and the other
+// for /internal/db, which handle respectively raft and gRPC-SQL requests.
+//
+// These handlers might return 404, either because this LXD node is a
+// non-clustered node not available over the network or because it is not a
+// database node part of the dqlite cluster.
+func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
+	grpc := func(w http.ResponseWriter, r *http.Request) {
+		if g.server == nil || g.memoryDial != nil {
+			http.NotFound(w, r)
+			return
+		}
+
+		// Before actually establishing the gRPC SQL connection, our
+		// dialer probes the node to see if it's currently the leader
+		// (otherwise it tries with another node or retry later).
+		if r.Method == "HEAD" {
+			if g.raft.Raft().State() != raft.Leader {
+				http.Error(w, "503 not leader", http.StatusServiceUnavailable)
+				return
+			}
+			return
+		}
+
+		g.server.ServeHTTP(w, r)
+	}
+	raft := func(w http.ResponseWriter, r *http.Request) {
+		if g.raft == nil || g.raft.HandlerFunc() == nil {
+			http.NotFound(w, r)
+			return
+		}
+		g.raft.HandlerFunc()(w, r)
+	}
+
+	return map[string]http.HandlerFunc{
+		grpcEndpoint: grpc,
+		raftEndpoint: raft,
+	}
+}
+
 // Dialer returns a gRPC dial function that can be used to connect to one of
 // the dqlite nodes via gRPC.
 func (g *Gateway) Dialer() grpcsql.Dialer {
 	return func() (*grpc.ClientConn, error) {
 		// Memory connection.
-		return g.memoryDial()
+		if g.memoryDial != nil {
+			return g.memoryDial()
+		}
+
+		// Network connection.
+		addresses, err := g.raftNodes()
+		if err != nil {
+			return nil, err
+		}
+
+		// FIXME: timeout should be configurable
+		remaining := 10 * time.Second
+		for remaining > 0 {
+			for _, address := range addresses {
+				var conn *grpc.ClientConn
+				conn, err = grpcNetworkDial(address, g.cert, time.Second)
+				if err == nil {
+					return conn, nil
+				}
+			}
+			time.Sleep(250 * time.Millisecond)
+		}
+		return nil, err
 	}
 }
 
@@ -76,19 +152,96 @@ func (g *Gateway) Shutdown() error {
 		// switching between in-memory and network mode.
 		g.memoryDial = nil
 	}
-	return nil
+	if g.raft == nil {
+		return nil
+	}
+	return g.raft.Shutdown()
 }
 
 // Initialize the gateway, creating a new raft factory and gRPC server (if this
 // node is a database node), and a gRPC dialer.
 func (g *Gateway) init() error {
-	g.server = grpcsql.NewServer(&sqlite3.SQLiteDriver{})
-	listener, dial := util.InMemoryNetwork()
-	go g.server.Serve(listener)
-	g.memoryDial = grpcMemoryDial(dial)
+	raft, err := newRaft(g.db, g.cert, g.latency)
+	if err != nil {
+		return errors.Wrap(err, "failed to create raft factory")
+	}
+
+	// If the resulting raft instance is not nil, it means that this node
+	// should serve as database node, so create a dqlite driver to be
+	// exposed it over gRPC.
+	if raft != nil {
+		driver, err := dqlite.NewDriver(raft.FSM(), raft.Raft(), dqlite.LogFunc(dqliteLog))
+		if err != nil {
+			return errors.Wrap(err, "failed to create dqlite driver")
+		}
+		server := grpcsql.NewServer(driver)
+		if raft.HandlerFunc() == nil {
+			// If no raft http handler is set, it means we are in
+			// single node mode and we don't have a network
+			// endpoint, so let's spin up a fully in-memory gRPC
+			// server.
+			listener, dial := util.InMemoryNetwork()
+			go server.Serve(listener)
+			g.memoryDial = grpcMemoryDial(dial)
+		}
+
+		g.server = server
+		g.raft = raft
+	}
 	return nil
 }
 
+// Wait for the raft node to become leader. Should only be used by Bootstrap,
+// since we know that we'll self elect.
+func (g *Gateway) waitLeadership() error {
+	for i := 0; i < 20; i++ {
+		if g.raft.raft.State() == raft.Leader {
+			return nil
+		}
+		time.Sleep(250 * time.Millisecond)
+	}
+	return fmt.Errorf("raft node did not self-elect within 5 seconds")
+}
+
+// Return the addresses of the current raft nodes.
+func (g *Gateway) raftNodes() ([]string, error) {
+	var addresses []string
+	err := g.db.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		addresses, err = tx.RaftNodeAddresses()
+		return err
+	})
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to fetch raft nodes")
+	}
+	return addresses, nil
+}
+
+func grpcNetworkDial(addr string, cert *shared.CertInfo, t time.Duration) (*grpc.ClientConn, error) {
+	config, err := tlsClientConfig(cert)
+	if err != nil {
+		return nil, err
+	}
+
+	// Make a probe HEAD request to check if the target node is the leader.
+	url := fmt.Sprintf("https://%s%s", addr, grpcEndpoint)
+	client := &http.Client{Transport: &http.Transport{TLSClientConfig: config}}
+	response, err := client.Head(url)
+	if err != nil {
+		return nil, err
+	}
+	if response.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf(response.Status)
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), t)
+	defer cancel()
+	options := []grpc.DialOption{
+		grpc.WithTransportCredentials(credentials.NewTLS(config)),
+	}
+	return grpc.DialContext(ctx, addr, options...)
+}
+
 // Convert a raw in-memory dial function into a gRPC one.
 func grpcMemoryDial(dial func() net.Conn) func() (*grpc.ClientConn, error) {
 	options := []grpc.DialOption{
@@ -102,3 +255,33 @@ func grpcMemoryDial(dial func() net.Conn) func() (*grpc.ClientConn, error) {
 		return grpc.Dial("", options...)
 	}
 }
+
+// The LXD API endpoint path that gets routed to a gRPC server handler for
+// performing SQL queries against the dqlite driver running on this node.
+//
+// FIXME: figure out if there's a way to configure the gRPC client to add a
+//        prefix to this url, e.g. /internal/db/protocol.SQL/Conn.
+const grpcEndpoint = "/protocol.SQL/Conn"
+
+// Redirect dqlite's logs to our own logger
+func dqliteLog(level, message string) {
+	if level == "TRACE" {
+		// Ignore TRACE level.
+		//
+		// TODO: lxd has no TRACE level, which is quite verbose in dqlite,
+		//       we'll need to take this level into account if we need to
+		//       do some deep debugging.
+		return
+	}
+
+	switch level {
+	case "DEBUG":
+		logger.Debug(message)
+	case "INFO":
+		logger.Info(message)
+	case "WARN":
+		logger.Warn(message)
+	default:
+		// Ignore any other log level.
+	}
+}
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
index 33072e993..cb5c500e2 100644
--- a/lxd/cluster/gateway_test.go
+++ b/lxd/cluster/gateway_test.go
@@ -1,10 +1,13 @@
 package cluster_test
 
 import (
+	"net/http"
+	"net/http/httptest"
 	"os"
 	"path/filepath"
 	"testing"
 
+	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
@@ -23,12 +26,48 @@ func TestGateway_Single(t *testing.T) {
 	gateway := newGateway(t, db, cert)
 	defer gateway.Shutdown()
 
+	handlerFuncs := gateway.HandlerFuncs()
+	assert.Len(t, handlerFuncs, 2)
+	for endpoint, f := range handlerFuncs {
+		w := httptest.NewRecorder()
+		r := &http.Request{}
+		f(w, r)
+		assert.Equal(t, 404, w.Code, endpoint)
+	}
+
 	dialer := gateway.Dialer()
 	conn, err := dialer()
 	assert.NoError(t, err)
 	assert.NotNil(t, conn)
 }
 
+// If there's a network address configured, we expose the gRPC endpoint with
+// an HTTP handler.
+func TestGateway_SingleWithNetworkAddress(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	defer server.Close()
+
+	address := server.Listener.Addr().String()
+	setRaftRole(t, db, address)
+
+	gateway := newGateway(t, db, cert)
+	defer gateway.Shutdown()
+
+	for path, handler := range gateway.HandlerFuncs() {
+		mux.HandleFunc(path, handler)
+	}
+
+	driver := grpcsql.NewDriver(gateway.Dialer())
+	conn, err := driver.Open("test.db")
+	require.NoError(t, err)
+	require.NoError(t, conn.Close())
+}
+
 // Create a new test Gateway with the given parameters, and ensure no error
 // happens.
 func newGateway(t *testing.T, db *db.Node, certInfo *shared.CertInfo) *cluster.Gateway {
diff --git a/lxd/cluster/raft.go b/lxd/cluster/raft.go
index 0b24ff8b9..7db15baf9 100644
--- a/lxd/cluster/raft.go
+++ b/lxd/cluster/raft.go
@@ -24,9 +24,9 @@ import (
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/log15"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/pkg/errors"
-	log15 "gopkg.in/inconshreveable/log15.v2"
 )
 
 // Create a raft instance and all its dependencies, to be used as backend for
diff --git a/lxd/daemon.go b/lxd/daemon.go
index a78ec69ee..ac61447be 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -407,8 +407,7 @@ func (d *Daemon) init() error {
 	address := daemonConfig["core.https_address"].Get()
 
 	/* Open the cluster database */
-	clusterFilename := filepath.Join(d.os.VarDir, "db.bin")
-	d.cluster, err = db.OpenCluster(clusterFilename, d.gateway.Dialer(), address)
+	d.cluster, err = db.OpenCluster("db.bin", d.gateway.Dialer(), address)
 	if err != nil {
 		return errors.Wrap(err, "failed to open cluster database")
 	}
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index 7c80ca8e8..89399b6df 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -14,6 +14,9 @@ spawn_lxd() {
     storage=${1}
     shift
 
+    # Link to local sqlite with replication patch for dqlite
+    sqlite="$(pwd)/../lxd/sqlite/.libs"
+
     # shellcheck disable=SC2153
     if [ "$LXD_BACKEND" = "random" ]; then
         lxd_backend="$(random_storage_backend)"
@@ -36,7 +39,8 @@ spawn_lxd() {
 
     echo "==> Spawning lxd in ${lxddir}"
     # shellcheck disable=SC2086
-    LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" ${DEBUG-} "$@" 2>&1 &
+
+    LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
     # shellcheck disable=SC2153
@@ -82,9 +86,12 @@ respawn_lxd() {
     lxddir=${1}
     shift
 
+    # Link to local sqlite with replication patch for dqlite
+    sqlite="$(pwd)/../lxd/sqlite/.libs"
+
     echo "==> Spawning lxd in ${lxddir}"
     # shellcheck disable=SC2086
-    LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" ${DEBUG-} "$@" 2>&1 &
+    LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
     echo "==> Spawned LXD (PID is ${LXD_PID})"

From 0c1bd853642169bc5852059d3d077f257b97fe4c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 17:54:57 +0000
Subject: [PATCH 018/227] Add APIs to modify the cluster database nodes table

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/node.go      | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++
 lxd/db/node_test.go | 31 +++++++++++++++++++++++++++++++
 2 files changed, 81 insertions(+)
 create mode 100644 lxd/db/node.go
 create mode 100644 lxd/db/node_test.go

diff --git a/lxd/db/node.go b/lxd/db/node.go
new file mode 100644
index 000000000..ca02779c1
--- /dev/null
+++ b/lxd/db/node.go
@@ -0,0 +1,50 @@
+package db
+
+import (
+	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/pkg/errors"
+)
+
+// NodeInfo holds information about a single LXD instance in a cluster.
+type NodeInfo struct {
+	ID            int64  // Stable node identifier
+	Name          string // User-assigned name of the node
+	Address       string // Network address of the node
+	Description   string // Node description (optional)
+	Schema        int    // Schema version of the LXD code running the node
+	APIExtensions int    // Number of API extensions of the LXD code running on the node
+}
+
+// Nodes returns all LXD nodes part of the cluster.
+//
+// If this LXD instance is not clustered, an empty list is returned.
+func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
+	nodes := []NodeInfo{}
+	dest := func(i int) []interface{} {
+		nodes = append(nodes, NodeInfo{})
+		return []interface{}{
+			&nodes[i].ID,
+			&nodes[i].Name,
+			&nodes[i].Address,
+			&nodes[i].Description,
+			&nodes[i].Schema,
+			&nodes[i].APIExtensions,
+		}
+	}
+	stmt := "SELECT id, name, address, description, schema, api_extensions FROM nodes ORDER BY id"
+	err := query.SelectObjects(c.tx, dest, stmt)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to fecth nodes")
+	}
+	return nodes, nil
+}
+
+// NodeAdd adds a node to the current list of LXD nodes that are part of the
+// cluster. It returns the ID of the newly inserted row.
+func (c *ClusterTx) NodeAdd(name string, address string) (int64, error) {
+	columns := []string{"name", "address", "schema", "api_extensions"}
+	values := []interface{}{name, address, cluster.SchemaVersion, len(version.APIExtensions)}
+	return query.UpsertObject(c.tx, "nodes", columns, values)
+}
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
new file mode 100644
index 000000000..82d3af111
--- /dev/null
+++ b/lxd/db/node_test.go
@@ -0,0 +1,31 @@
+package db_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Add a new raft node.
+func TestNodeAdd(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	id, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), id)
+
+	nodes, err := tx.Nodes()
+	require.NoError(t, err)
+	require.Len(t, nodes, 1)
+
+	node := nodes[0]
+	assert.Equal(t, "buzz", node.Name)
+	assert.Equal(t, "1.2.3.4:666", node.Address)
+	assert.Equal(t, cluster.SchemaVersion, node.Schema)
+	assert.Equal(t, len(version.APIExtensions), node.APIExtensions)
+}

From 682b1d5c40a530356f2b0ef4565d31d166266f64 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 20:55:25 +0000
Subject: [PATCH 019/227] Conditionally load the server or cluster certificate

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go          |  2 +-
 lxd/util/encryption.go | 19 +++++++++++++++++++
 2 files changed, 20 insertions(+), 1 deletion(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index ac61447be..e13675cdf 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -381,7 +381,7 @@ func (d *Daemon) init() error {
 	}
 
 	/* Setup server certificate */
-	certInfo, err := shared.KeyPairAndCA(d.os.VarDir, "server", shared.CertServer)
+	certInfo, err := util.LoadCert(d.os.VarDir)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/util/encryption.go b/lxd/util/encryption.go
index a015bf514..43e7aecaf 100644
--- a/lxd/util/encryption.go
+++ b/lxd/util/encryption.go
@@ -4,6 +4,10 @@ import (
 	"bytes"
 	"encoding/hex"
 	"fmt"
+	"path/filepath"
+
+	"github.com/lxc/lxd/shared"
+	"github.com/pkg/errors"
 
 	"golang.org/x/crypto/scrypt"
 )
@@ -32,3 +36,18 @@ func PasswordCheck(secret, password string) error {
 
 	return nil
 }
+
+// LoadCert reads the LXD server certificate from the given var dir.
+//
+// If a cluster certificate is found it will be loaded instead.
+func LoadCert(dir string) (*shared.CertInfo, error) {
+	prefix := "server"
+	if shared.PathExists(filepath.Join(dir, "cluster.crt")) {
+		prefix = "cluster"
+	}
+	cert, err := shared.KeyPairAndCA(dir, prefix, shared.CertServer)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to load TLS certificate")
+	}
+	return cert, nil
+}

From ce2148d1026bf7e4a6c53c5c861c0e89350e7d95 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 16 Oct 2017 12:23:07 +0000
Subject: [PATCH 020/227] Make NewTestOS also setup the testing certificates

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon_integration_test.go | 48 ++++--------------------------------------
 lxd/devlxd_test.go             |  8 +++----
 lxd/main_test.go               |  3 ++-
 lxd/sys/testing.go             | 29 +++++++++++++++++++++++++
 4 files changed, 39 insertions(+), 49 deletions(-)

diff --git a/lxd/daemon_integration_test.go b/lxd/daemon_integration_test.go
index 0f689dfa5..f18c0e78c 100644
--- a/lxd/daemon_integration_test.go
+++ b/lxd/daemon_integration_test.go
@@ -1,9 +1,6 @@
 package main
 
 import (
-	"io/ioutil"
-	"os"
-	"path/filepath"
 	"testing"
 
 	lxd "github.com/lxc/lxd/client"
@@ -32,21 +29,16 @@ func newDaemon(t *testing.T) (*Daemon, func()) {
 	// Logging
 	resetLogger := logging.Testing(t)
 
-	// Test directory
-	dir, err := ioutil.TempDir("", "lxd-integration-test")
-	require.NoError(t, err)
-
-	// Test certificates
-	require.NoError(t, os.Mkdir(filepath.Join(dir, "var"), 0755))
-	require.NoError(t, setupTestCerts(filepath.Join(dir, "var")))
+	// OS
+	os, osCleanup := sys.NewTestOS(t)
 
 	// Daemon
-	daemon := NewDaemon(newConfig(), newOS(dir))
+	daemon := NewDaemon(newConfig(), os)
 	require.NoError(t, daemon.Init())
 
 	cleanup := func() {
 		require.NoError(t, daemon.Stop())
-		require.NoError(t, os.RemoveAll(dir))
+		osCleanup()
 		resetLogger()
 	}
 
@@ -59,35 +51,3 @@ func newConfig() *DaemonConfig {
 		RaftLatency: 0.2,
 	}
 }
-
-// Create a new sys.OS object for testing purposes.
-func newOS(dir string) *sys.OS {
-	return &sys.OS{
-		// FIXME: setting mock mode can be avoided once daemon tasks
-		// are fixed to exit gracefully. See daemon.go.
-		MockMode: true,
-
-		VarDir:   filepath.Join(dir, "var"),
-		CacheDir: filepath.Join(dir, "cache"),
-		LogDir:   filepath.Join(dir, "log"),
-	}
-}
-
-// Populate the given test LXD directory with server certificates.
-//
-// Since generating certificates is CPU intensive, they will be simply
-// symlink'ed from the test/deps/ directory.
-func setupTestCerts(dir string) error {
-	cwd, err := os.Getwd()
-	if err != nil {
-		return err
-	}
-	deps := filepath.Join(cwd, "..", "test", "deps")
-	for _, f := range []string{"server.crt", "server.key"} {
-		err := os.Symlink(filepath.Join(deps, f), filepath.Join(dir, f))
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
diff --git a/lxd/devlxd_test.go b/lxd/devlxd_test.go
index 6a029859e..b3978e1a5 100644
--- a/lxd/devlxd_test.go
+++ b/lxd/devlxd_test.go
@@ -9,6 +9,8 @@ import (
 	"path/filepath"
 	"strings"
 	"testing"
+
+	"github.com/lxc/lxd/lxd/sys"
 )
 
 var testDir string
@@ -38,7 +40,7 @@ func setupDir() error {
 	if err != nil {
 		return err
 	}
-	err = setupTestCerts(testDir)
+	err = sys.SetupTestCerts(testDir)
 	if err != nil {
 		return err
 	}
@@ -129,9 +131,7 @@ func TestCredsSendRecv(t *testing.T) {
  * point where it realizes the pid isn't in a container without crashing).
  */
 func TestHttpRequest(t *testing.T) {
-	if err := setupDir(); err != nil {
-		t.Fatal(err)
-	}
+	setupDir()
 	defer os.RemoveAll(testDir)
 
 	d := DefaultDaemon()
diff --git a/lxd/main_test.go b/lxd/main_test.go
index 5555e199e..2c1acfd54 100644
--- a/lxd/main_test.go
+++ b/lxd/main_test.go
@@ -6,6 +6,7 @@ import (
 	"os"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/sys"
 	"github.com/stretchr/testify/require"
 	"github.com/stretchr/testify/suite"
 
@@ -20,7 +21,7 @@ func mockStartDaemon() (*Daemon, error) {
 	// Setup test certificates. We re-use the ones already on disk under
 	// the test/ directory, to avoid generating new ones, which is
 	// expensive.
-	err := setupTestCerts(shared.VarPath())
+	err := sys.SetupTestCerts(shared.VarPath())
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/sys/testing.go b/lxd/sys/testing.go
index b0bb8a42a..537fd1c77 100644
--- a/lxd/sys/testing.go
+++ b/lxd/sys/testing.go
@@ -4,6 +4,7 @@ import (
 	"io/ioutil"
 	"os"
 	"path/filepath"
+	"runtime"
 	"testing"
 
 	"github.com/stretchr/testify/require"
@@ -13,16 +14,44 @@ import (
 func NewTestOS(t *testing.T) (*OS, func()) {
 	dir, err := ioutil.TempDir("", "lxd-sys-os-test-")
 	require.NoError(t, err)
+	require.NoError(t, SetupTestCerts(dir))
 
 	cleanup := func() {
 		require.NoError(t, os.RemoveAll(dir))
 	}
 
 	os := &OS{
+		// FIXME: setting mock mode can be avoided once daemon tasks
+		// are fixed to exit gracefully. See daemon.go.
+		MockMode: true,
+
 		VarDir:   dir,
 		CacheDir: filepath.Join(dir, "cache"),
 		LogDir:   filepath.Join(dir, "log"),
 	}
 
+	require.NoError(t, os.Init())
+
 	return os, cleanup
 }
+
+// SetupTestCerts populates the given test LXD directory with server
+// certificates.
+//
+// Since generating certificates is CPU intensive, they will be simply
+// symlink'ed from the test/deps/ directory.
+//
+// FIXME: this function is exported because some tests use it
+//        directly. Eventually we should rework those tests to use NewTestOS
+//        instead.
+func SetupTestCerts(dir string) error {
+	_, filename, _, _ := runtime.Caller(0)
+	deps := filepath.Join(filepath.Dir(filename), "..", "..", "test", "deps")
+	for _, f := range []string{"server.crt", "server.key"} {
+		err := os.Symlink(filepath.Join(deps, f), filepath.Join(dir, f))
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}

From b083c8859a6e48cc06c7cc597e16c07369fb0cb4 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 12 Oct 2017 18:21:52 +0000
Subject: [PATCH 021/227] Add cluster.Bootstrap

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 166 +++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/membership_test.go | 163 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 329 insertions(+)
 create mode 100644 lxd/cluster/membership.go
 create mode 100644 lxd/cluster/membership_test.go

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
new file mode 100644
index 000000000..89e847e5a
--- /dev/null
+++ b/lxd/cluster/membership.go
@@ -0,0 +1,166 @@
+package cluster
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared"
+	"github.com/pkg/errors"
+)
+
+// Bootstrap turns a non-clustered LXD instance into the first (and leader)
+// node of a new LXD cluster.
+//
+// This instance must already have its core.https_address set and be listening
+// on the associated network address.
+func Bootstrap(state *state.State, gateway *Gateway, name string) error {
+	// Check parameters
+	if name == "" {
+		return fmt.Errorf("node name must not be empty")
+	}
+
+	// Sanity check that there's no leftover cluster certificate
+	for _, basename := range []string{"cluster.crt", "cluster.key", "cluster.ca"} {
+		if shared.PathExists(filepath.Join(state.OS.VarDir, basename)) {
+			return fmt.Errorf("inconsistent state: found leftover cluster certificate")
+		}
+	}
+
+	var address string
+	err := state.Node.Transaction(func(tx *db.NodeTx) error {
+		// Fetch current network address and raft nodes
+		config, err := node.ConfigLoad(tx)
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch node configuration")
+		}
+		address = config.HTTPSAddress()
+
+		// Make sure node-local database state is in order.
+		err = membershipCheckNodeStateForBootstrapOrJoin(tx, address)
+		if err != nil {
+			return err
+		}
+
+		// Add ourselves as first raft node
+		err = tx.RaftNodeFirst(address)
+		if err != nil {
+			return errors.Wrap(err, "failed to insert first raft node")
+		}
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	// Insert ourselves into the nodes table.
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		// Make sure cluster database state is in order.
+		err := membershipCheckClusterStateForBootstrapOrJoin(tx)
+		if err != nil {
+			return err
+		}
+
+		// Add ourselves to the nodes table.
+		_, err = tx.NodeAdd(name, address)
+		if err != nil {
+			return errors.Wrap(err, "failed to insert cluster node")
+		}
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	// Shutdown the gateway. This will trash any gRPC SQL connection
+	// against our in-memory dqlite driver and shutdown the associated raft
+	// instance.
+	err = gateway.Shutdown()
+	if err != nil {
+		return errors.Wrap(err, "failed to shutdown gRPC SQL gateway")
+	}
+
+	// Re-initialize the gateway. This will create a new raft factory an
+	// dqlite driver instance, which will be exposed over gRPC by the
+	// gateway handlers.
+	err = gateway.init()
+	if err != nil {
+		return errors.Wrap(err, "failed to re-initialize gRPC SQL gateway")
+	}
+	err = gateway.waitLeadership()
+	if err != nil {
+		return err
+	}
+
+	// The cluster certificates are symlinks against the regular node
+	// certificate.
+	for _, ext := range []string{".crt", ".key", ".ca"} {
+		if ext == ".ca" && !shared.PathExists(filepath.Join(state.OS.VarDir, "server.ca")) {
+			continue
+		}
+		err := os.Symlink("server"+ext, filepath.Join(state.OS.VarDir, "cluster"+ext))
+		if err != nil {
+			return errors.Wrap(err, "failed to create cluster cert symlink")
+		}
+	}
+
+	// Make sure we can actually connect to the cluster database through
+	// the network endpoint. This also makes the Go SQL pooling system
+	// invalidate the old connection, so new queries will be executed over
+	// the new gRPC network connection.
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		_, err := tx.Nodes()
+		return err
+	})
+	if err != nil {
+		return errors.Wrap(err, "cluster database initialization failed")
+	}
+
+	return nil
+}
+
+// Check that node-related preconditions are met for bootstrapping or joining a
+// cluster.
+func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
+	nodes, err := tx.RaftNodes()
+	if err != nil {
+		return errors.Wrap(err, "failed to fetch current raft nodes")
+	}
+
+	hasNetworkAddress := address != ""
+	hasRaftNodes := len(nodes) > 0
+
+	// Sanity check that we're not in an inconsistent situation, where no
+	// network address is set, but still there are entries in the
+	// raft_nodes table.
+	if !hasNetworkAddress && hasRaftNodes {
+		return fmt.Errorf("inconsistent state: found leftover entries in raft_nodes")
+	}
+
+	if !hasNetworkAddress {
+		return fmt.Errorf("no core.https_address config is set on this node")
+	}
+	if hasRaftNodes {
+		return fmt.Errorf("the node is already part of a cluster")
+	}
+
+	return nil
+}
+
+// Check that cluster-related preconditions are met for bootstrapping or
+// joining a cluster.
+func membershipCheckClusterStateForBootstrapOrJoin(tx *db.ClusterTx) error {
+	nodes, err := tx.Nodes()
+	if err != nil {
+		return errors.Wrap(err, "failed to fetch current cluster nodes")
+	}
+	if len(nodes) > 0 {
+		return fmt.Errorf("inconsistent state: found leftover entries in nodes")
+	}
+	return nil
+}
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
new file mode 100644
index 000000000..670aecfad
--- /dev/null
+++ b/lxd/cluster/membership_test.go
@@ -0,0 +1,163 @@
+package cluster_test
+
+import (
+	"io/ioutil"
+	"net/http"
+	"path/filepath"
+	"testing"
+
+	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared"
+	"github.com/mpvl/subtest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestBootstrap_UnmetPreconditions(t *testing.T) {
+	cases := []struct {
+		setup func(*membershipFixtures)
+		error string
+	}{
+		{
+			func(f *membershipFixtures) {
+				f.NetworkAddress("1.2.3.4:666")
+				filename := filepath.Join(f.state.OS.VarDir, "cluster.crt")
+				ioutil.WriteFile(filename, []byte{}, 0644)
+			},
+			"inconsistent state: found leftover cluster certificate",
+		},
+		{
+			func(*membershipFixtures) {},
+			"no core.https_address config is set on this node",
+		},
+		{
+			func(f *membershipFixtures) {
+				f.NetworkAddress("1.2.3.4:666")
+				f.RaftNode("5.6.7.8:666")
+			},
+			"the node is already part of a cluster",
+		},
+		{
+			func(f *membershipFixtures) {
+				f.RaftNode("5.6.7.8:666")
+			},
+			"inconsistent state: found leftover entries in raft_nodes",
+		},
+		{
+			func(f *membershipFixtures) {
+				f.NetworkAddress("1.2.3.4:666")
+				f.ClusterNode("5.6.7.8:666")
+			},
+			"inconsistent state: found leftover entries in nodes",
+		},
+	}
+
+	for _, c := range cases {
+		subtest.Run(t, c.error, func(t *testing.T) {
+			state, cleanup := state.NewTestState(t)
+			defer cleanup()
+
+			c.setup(&membershipFixtures{t: t, state: state})
+
+			cert := shared.TestingKeyPair()
+			gateway := newGateway(t, state.Node, cert)
+			defer gateway.Shutdown()
+
+			err := cluster.Bootstrap(state, gateway, "buzz")
+			assert.EqualError(t, err, c.error)
+		})
+	}
+}
+
+func TestBootstrap(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+	gateway := newGateway(t, state.Node, cert)
+	defer gateway.Shutdown()
+
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	defer server.Close()
+
+	address := server.Listener.Addr().String()
+	f := &membershipFixtures{t: t, state: state}
+	f.NetworkAddress(address)
+
+	err := cluster.Bootstrap(state, gateway, "buzz")
+	require.NoError(t, err)
+
+	// The node-local database has now an entry in the raft_nodes table
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		require.NoError(t, err)
+		require.Len(t, nodes, 1)
+		assert.Equal(t, int64(1), nodes[0].ID)
+		assert.Equal(t, address, nodes[0].Address)
+		return nil
+	})
+	require.NoError(t, err)
+
+	// The cluster database has now an entry in the nodes table
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		require.Len(t, nodes, 1)
+		assert.Equal(t, "buzz", nodes[0].Name)
+		assert.Equal(t, address, nodes[0].Address)
+		return nil
+	})
+	require.NoError(t, err)
+
+	// The cluster certificate is in place.
+	assert.True(t, shared.PathExists(filepath.Join(state.OS.VarDir, "cluster.crt")))
+
+	// The dqlite driver is now exposed over the network.
+	for path, handler := range gateway.HandlerFuncs() {
+		mux.HandleFunc(path, handler)
+	}
+
+	driver := grpcsql.NewDriver(gateway.Dialer())
+	conn, err := driver.Open("test.db")
+	require.NoError(t, err)
+	require.NoError(t, conn.Close())
+}
+
+// Helper for setting fixtures for Bootstrap tests.
+type membershipFixtures struct {
+	t     *testing.T
+	state *state.State
+}
+
+// Set core.https_address to the given value.
+func (h *membershipFixtures) NetworkAddress(address string) {
+	err := h.state.Node.Transaction(func(tx *db.NodeTx) error {
+		config := map[string]string{
+			"core.https_address": address,
+		}
+		return tx.UpdateConfig(config)
+	})
+	require.NoError(h.t, err)
+}
+
+// Add the given address to the raft_nodes table.
+func (h *membershipFixtures) RaftNode(address string) {
+	err := h.state.Node.Transaction(func(tx *db.NodeTx) error {
+		_, err := tx.RaftNodeAdd(address)
+		return err
+	})
+	require.NoError(h.t, err)
+}
+
+// Add the given address to the nodes table of the cluster database.
+func (h *membershipFixtures) ClusterNode(address string) {
+	err := h.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		_, err := tx.NodeAdd("rusp", address)
+		return err
+	})
+	require.NoError(h.t, err)
+}

From de6894538c2c5a7fa5493c606a26d73e380b2072 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 19:35:13 +0000
Subject: [PATCH 022/227] Add BootstrapCluster REST API command

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go    |  3 +++
 client/lxd_cluster.go   | 14 ++++++++++++++
 lxd/api_1.0.go          |  1 +
 lxd/api_cluster.go      | 41 +++++++++++++++++++++++++++++++++++++++++
 lxd/api_cluster_test.go | 35 +++++++++++++++++++++++++++++++++++
 shared/api/cluster.go   |  9 +++++++++
 6 files changed, 103 insertions(+)
 create mode 100644 client/lxd_cluster.go
 create mode 100644 lxd/api_cluster.go
 create mode 100644 lxd/api_cluster_test.go
 create mode 100644 shared/api/cluster.go

diff --git a/client/interfaces.go b/client/interfaces.go
index 85d0ec207..c509ca92f 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -161,6 +161,9 @@ type ContainerServer interface {
 	DeleteStoragePoolVolume(pool string, volType string, name string) (err error)
 	RenameStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePost) (err error)
 
+	// Cluster functions ("cluster" API extensions)
+	BootstrapCluster(name string) (op *Operation, err error)
+
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
 	RawWebsocket(path string) (conn *websocket.Conn, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
new file mode 100644
index 000000000..672665ccb
--- /dev/null
+++ b/client/lxd_cluster.go
@@ -0,0 +1,14 @@
+package lxd
+
+import "github.com/lxc/lxd/shared/api"
+
+// BootstrapCluster request to bootstrap a new cluster.
+func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
+	cluster := api.ClusterPost{Name: name}
+	op, _, err := r.queryOperation("POST", "/cluster", cluster, "")
+	if err != nil {
+		return nil, err
+	}
+
+	return op, nil
+}
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 4c18d2459..501e21dcc 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -56,6 +56,7 @@ var api10 = []Command{
 	storagePoolVolumesTypeCmd,
 	storagePoolVolumeTypeCmd,
 	serverResourceCmd,
+	clusterCmd,
 }
 
 func api10Get(d *Daemon, r *http.Request) Response {
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
new file mode 100644
index 000000000..4e8bb0419
--- /dev/null
+++ b/lxd/api_cluster.go
@@ -0,0 +1,41 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/shared/api"
+)
+
+var clusterCmd = Command{name: "cluster", post: clusterPost}
+
+func clusterPost(d *Daemon, r *http.Request) Response {
+	req := api.ClusterPost{}
+
+	// Parse the request
+	err := json.NewDecoder(r.Body).Decode(&req)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	// Sanity checks
+	if req.Name == "" {
+		return BadRequest(fmt.Errorf("No name provided"))
+	}
+
+	run := func(op *operation) error {
+		return cluster.Bootstrap(d.State(), d.gateway, req.Name)
+	}
+
+	resources := map[string][]string{}
+	resources["cluster"] = []string{}
+
+	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	return OperationResponse(op)
+}
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
new file mode 100644
index 000000000..aa096b9d6
--- /dev/null
+++ b/lxd/api_cluster_test.go
@@ -0,0 +1,35 @@
+package main
+
+import (
+	"fmt"
+	"testing"
+
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/shared"
+	"github.com/stretchr/testify/require"
+)
+
+// A LXD node which is already configured for networking can be coverted to a
+// single-node LXD cluster.
+func TestCluster_Bootstrap(t *testing.T) {
+	daemon, cleanup := newDaemon(t)
+	defer cleanup()
+
+	client, err := lxd.ConnectLXDUnix(daemon.UnixSocket(), nil)
+	require.NoError(t, err)
+
+	server, _, err := client.GetServer()
+	require.NoError(t, err)
+
+	port, err := shared.AllocatePort()
+	require.NoError(t, err)
+
+	serverPut := server.Writable()
+	serverPut.Config["core.https_address"] = fmt.Sprintf("localhost:%d", port)
+
+	require.NoError(t, client.UpdateServer(serverPut, ""))
+
+	op, err := client.BootstrapCluster("buzz")
+	require.NoError(t, err)
+	require.NoError(t, op.Wait())
+}
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
new file mode 100644
index 000000000..847264214
--- /dev/null
+++ b/shared/api/cluster.go
@@ -0,0 +1,9 @@
+package api
+
+// ClusterPost represents the fields required to bootstrap or join a LXD
+// cluster.
+//
+// API extension: cluster
+type ClusterPost struct {
+	Name string `json:"name" yaml:"name"`
+}

From d35563a4be1bddc1a61f57161cec365a80ba6dea Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 22:17:56 +0000
Subject: [PATCH 023/227] Add support for bootstrapping a cluster in lxd init

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/main_init.go                | 105 +++++++++++++++++++++++++++++++++++++++-
 lxd/main_init_test.go           |  34 ++++++++++++-
 lxd/util/net.go                 |  29 +++++++++++
 test/suites/init_interactive.sh |   1 +
 4 files changed, 167 insertions(+), 2 deletions(-)

diff --git a/lxd/main_init.go b/lxd/main_init.go
index f556599bb..43cf13bed 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -3,6 +3,7 @@ package main
 import (
 	"fmt"
 	"net"
+	"os"
 	"os/exec"
 	"strconv"
 	"strings"
@@ -131,12 +132,29 @@ func (cmd *CmdInit) fillDataAuto(data *cmdInitData, client lxd.ContainerServer,
 // Fill the given configuration data with parameters collected with
 // interactive questions.
 func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerServer, backendsAvailable []string, existingPools []string) error {
+	clustering, err := cmd.askClustering()
+	if err != nil {
+		return err
+	}
 	storage, err := cmd.askStorage(client, existingPools, backendsAvailable)
 	if err != nil {
 		return err
 	}
 	defaultPrivileged := cmd.askDefaultPrivileged()
-	networking := cmd.askNetworking()
+
+	// Ask about networking only if we skipped the clustering questions.
+	var networking *cmdInitNetworkingParams
+	if clustering == nil {
+		networking = cmd.askNetworking()
+	} else {
+		// Re-use the answers to the clustering questions.
+		networking = &cmdInitNetworkingParams{
+			Address:       clustering.Address,
+			Port:          clustering.Port,
+			TrustPassword: clustering.TrustPassword,
+		}
+	}
+
 	imagesAutoUpdate := cmd.askImages()
 	bridge := cmd.askBridge(client)
 
@@ -145,6 +163,8 @@ func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerS
 		return fmt.Errorf("LXD managed bridges require \"dnsmasq\". Install it and try again.")
 	}
 
+	cmd.fillDataWithClustering(data, clustering)
+
 	err = cmd.fillDataWithStorage(data, storage, existingPools)
 	if err != nil {
 		return err
@@ -198,6 +218,15 @@ func (cmd *CmdInit) fillDataWithCurrentDefaultProfile(data *cmdInitData, client
 	}
 }
 
+// Fill the given init data with clustering details matching the given
+// clustering parameters.
+func (cmd *CmdInit) fillDataWithClustering(data *cmdInitData, clustering *cmdInitClusteringParams) {
+	if clustering == nil {
+		return
+	}
+	data.Cluster.Name = clustering.Name
+}
+
 // Fill the given init data with a new storage pool structure matching the
 // given storage parameters.
 func (cmd *CmdInit) fillDataWithStorage(data *cmdInitData, storage *cmdInitStorageParams, existingPools []string) error {
@@ -382,6 +411,13 @@ func (cmd *CmdInit) apply(client lxd.ContainerServer, data *cmdInitData) error {
 		return cmd.initConfig(client, data.Config)
 	})
 
+	// Cluster changers
+	if data.Cluster.Name != "" {
+		changers = append(changers, func() (reverter, error) {
+			return cmd.initCluster(client, data.Cluster)
+		})
+	}
+
 	// Storage pool changers
 	for i := range data.Pools {
 		pool := data.Pools[i] // Local variable for the closure
@@ -465,6 +501,17 @@ func (cmd *CmdInit) initConfig(client lxd.ContainerServer, config map[string]int
 	return reverter, nil
 }
 
+// Turn on clustering.
+func (cmd *CmdInit) initCluster(client lxd.ContainerServer, cluster api.ClusterPost) (reverter, error) {
+	var reverter func() error
+	op, err := client.BootstrapCluster(cluster.Name)
+	if err != nil {
+		return nil, err
+	}
+	op.Wait()
+	return reverter, nil
+}
+
 // Create or update a single pool, and return a revert function in case of success.
 func (cmd *CmdInit) initPool(client lxd.ContainerServer, pool api.StoragePoolsPost) (reverter, error) {
 	var reverter func() error
@@ -669,6 +716,52 @@ func (cmd *CmdInit) profileDeviceAlreadyExists(profile *api.ProfilesPost, device
 	return nil
 }
 
+// Ask if the user wants to enable clustering
+func (cmd *CmdInit) askClustering() (*cmdInitClusteringParams, error) {
+	askWants := "Would you like to use LXD clustering? (yes/no) [default=no]: "
+	if !cmd.Context.AskBool(askWants, "no") {
+		return nil, nil
+	}
+
+	params := &cmdInitClusteringParams{}
+
+	// Node name
+	hostname, err := os.Hostname()
+	if err != nil {
+		hostname = "lxd"
+	}
+	askName := fmt.Sprintf(
+		"What name should be used to identify this node in the cluster? [default=%s]: ",
+		hostname)
+	params.Name = cmd.Context.AskString(askName, hostname, nil)
+
+	// Network address
+	address := util.NetworkInterfaceAddress()
+	askAddress := fmt.Sprintf(
+		"What IP address or DNS name should be used to reach this node? [default=%s]: ",
+		address)
+	address = util.CanonicalNetworkAddress(cmd.Context.AskString(askAddress, address, nil))
+	host, port, err := net.SplitHostPort(address)
+	if err != nil {
+		return nil, err
+	}
+	portN, err := strconv.Atoi(port)
+	if err != nil {
+		return nil, err
+	}
+	params.Address = host
+	params.Port = int64(portN)
+
+	// Join existing cluster
+	if !cmd.Context.AskBool("Are you joining an existing cluster? (yes/no) [default=no]: ", "no") {
+		params.TrustPassword = cmd.Context.AskPassword(
+			"Trust password for new clients: ", cmd.PasswordReader)
+		return params, nil
+	}
+
+	return nil, fmt.Errorf("joining cluster not yet implemented")
+}
+
 // Ask if the user wants to create a new storage pool, and return
 // the relevant parameters if so.
 func (cmd *CmdInit) askStorage(client lxd.ContainerServer, existingPools []string, availableBackends []string) (*cmdInitStorageParams, error) {
@@ -939,6 +1032,16 @@ type cmdInitData struct {
 	Pools         []api.StoragePoolsPost `yaml:"storage_pools"`
 	Networks      []api.NetworksPost
 	Profiles      []api.ProfilesPost
+	Cluster       api.ClusterPost
+}
+
+// Parameters needed when enbling clustering in interactive mode.
+type cmdInitClusteringParams struct {
+	Name          string // Name of the new node
+	Address       string // Network address of the new node
+	Port          int64  // Network port of the new node
+	Join          string // Network address of existing node to join.
+	TrustPassword string // Trust password
 }
 
 // Parameters needed when creating a storage pool in interactive or auto
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 378927efd..36829b5d4 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -115,6 +115,26 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveHTTPSAddressAndTrustPasswo
 	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
 }
 
+// Enable clustering interactively.
+func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClustering() {
+	suite.command.PasswordReader = func(int) ([]byte, error) {
+		return []byte("sekret"), nil
+	}
+	port, err := shared.AllocatePort()
+	suite.Req.Nil(err)
+	answers := &cmdInitAnswers{
+		WantClustering: true,
+		ClusterName:    "buzz",
+		ClusterAddress: fmt.Sprintf("127.0.0.1:%d", port),
+	}
+	answers.Render(suite.streams)
+
+	suite.Req.Nil(suite.command.Run())
+	state := suite.d.State()
+	certfile := filepath.Join(state.OS.VarDir, "cluster.crt")
+	suite.Req.True(shared.PathExists(certfile))
+}
+
 // Pass network address and trust password via command line arguments.
 func (suite *cmdInitTestSuite) TestCmdInit_AutoHTTPSAddressAndTrustPassword() {
 	port, err := shared.AllocatePort()
@@ -631,6 +651,10 @@ func (suite *cmdInitTestSuite) TestCmdInit_ProfilesPreseedUpdate() {
 // Convenience for building the input text a user would enter for a certain
 // sequence of answers.
 type cmdInitAnswers struct {
+	WantClustering           bool
+	WantJoinCluster          bool
+	ClusterName              string
+	ClusterAddress           string
 	WantStoragePool          bool
 	WantAvailableOverNetwork bool
 	BindToAddress            string
@@ -645,8 +669,16 @@ type cmdInitAnswers struct {
 // Render the input text the user would type for the desired answers, populating
 // the stdin of the given streams.
 func (answers *cmdInitAnswers) Render(streams *cmd.MemoryStreams) {
+	streams.InputAppendBoolAnswer(answers.WantClustering)
+	if answers.WantClustering {
+		streams.InputAppendLine(answers.ClusterName)
+		streams.InputAppendLine(answers.ClusterAddress)
+		streams.InputAppendBoolAnswer(answers.WantJoinCluster)
+	}
 	streams.InputAppendBoolAnswer(answers.WantStoragePool)
-	streams.InputAppendBoolAnswer(answers.WantAvailableOverNetwork)
+	if !answers.WantClustering {
+		streams.InputAppendBoolAnswer(answers.WantAvailableOverNetwork)
+	}
 	if answers.WantAvailableOverNetwork {
 		streams.InputAppendLine(answers.BindToAddress)
 		streams.InputAppendLine(answers.BindToPort)
diff --git a/lxd/util/net.go b/lxd/util/net.go
index aa94af2a1..8f874d118 100644
--- a/lxd/util/net.go
+++ b/lxd/util/net.go
@@ -103,3 +103,32 @@ func ServerTLSConfig(cert *shared.CertInfo) *tls.Config {
 	config.BuildNameToCertificate()
 	return config
 }
+
+// NetworkInterfaceAddress returns the first non-loopback address of any of the
+// system network interfaces.
+//
+// Return the empty string if none is found.
+func NetworkInterfaceAddress() string {
+	ifaces, err := net.Interfaces()
+	if err != nil {
+		return ""
+	}
+	for _, iface := range ifaces {
+		if shared.IsLoopback(&iface) {
+			continue
+		}
+		addrs, err := iface.Addrs()
+		if err != nil {
+			continue
+		}
+		if len(addrs) == 0 {
+			continue
+		}
+		addr, ok := addrs[0].(*net.IPNet)
+		if !ok {
+			continue
+		}
+		return addr.IP.String()
+	}
+	return ""
+}
diff --git a/test/suites/init_interactive.sh b/test/suites/init_interactive.sh
index 0e70663b6..ab7ec40db 100644
--- a/test/suites/init_interactive.sh
+++ b/test/suites/init_interactive.sh
@@ -16,6 +16,7 @@ test_init_interactive() {
     fi
 
     cat <<EOF | lxd init
+no
 yes
 my-storage-pool
 dir

From 77cf3a21b4a467ff18f9bd35205701b8a0354c08 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 16 Oct 2017 09:23:03 +0000
Subject: [PATCH 024/227] Add cluster.Accept to accept a new cluster node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 119 ++++++++++++++++++++++++++++++++++++++---
 lxd/cluster/membership_test.go | 113 ++++++++++++++++++++++++++++++++++++++
 shared/cert.go                 |  10 ++++
 3 files changed, 236 insertions(+), 6 deletions(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 89e847e5a..f137dd26d 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -6,6 +6,7 @@ import (
 	"path/filepath"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
@@ -23,15 +24,13 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 		return fmt.Errorf("node name must not be empty")
 	}
 
-	// Sanity check that there's no leftover cluster certificate
-	for _, basename := range []string{"cluster.crt", "cluster.key", "cluster.ca"} {
-		if shared.PathExists(filepath.Join(state.OS.VarDir, basename)) {
-			return fmt.Errorf("inconsistent state: found leftover cluster certificate")
-		}
+	err := membershipCheckNoLeftoverClusterCert(state.OS.VarDir)
+	if err != nil {
+		return err
 	}
 
 	var address string
-	err := state.Node.Transaction(func(tx *db.NodeTx) error {
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
 		// Fetch current network address and raft nodes
 		config, err := node.ConfigLoad(tx)
 		if err != nil {
@@ -124,6 +123,66 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 	return nil
 }
 
+// Accept a new node and add it to the cluster.
+//
+// This instance must already be clustered.
+//
+// Return an updated list raft database nodes (possibly including the newly
+// accepted node).
+func Accept(state *state.State, name, address string, schema, api int) ([]db.RaftNode, error) {
+	// Check parameters
+	if name == "" {
+		return nil, fmt.Errorf("node name must not be empty")
+	}
+	if address == "" {
+		return nil, fmt.Errorf("node address must not be empty")
+	}
+
+	// Insert the new node into the nodes table.
+	err := state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		// Check that the node can be accepted with these parameters.
+		err := membershipCheckClusterStateForAccept(tx, name, address, schema, api)
+		if err != nil {
+			return err
+		}
+		// Add the new node
+		_, err = tx.NodeAdd(name, address)
+		if err != nil {
+			return errors.Wrap(err, "failed to insert first raft node")
+		}
+
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Possibly insert the new node into the raft_nodes table (if we have
+	// less than 3 database nodes).
+	var nodes []db.RaftNode
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		nodes, err = tx.RaftNodes()
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch current raft nodes")
+		}
+		if len(nodes) >= membershipMaxRaftNodes {
+			return nil
+		}
+		id, err := tx.RaftNodeAdd(address)
+		if err != nil {
+			return err
+		}
+		nodes = append(nodes, db.RaftNode{ID: id, Address: address})
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return nodes, nil
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
@@ -164,3 +223,51 @@ func membershipCheckClusterStateForBootstrapOrJoin(tx *db.ClusterTx) error {
 	}
 	return nil
 }
+
+// Check that cluster-related preconditions are met for accepting a new node.
+func membershipCheckClusterStateForAccept(tx *db.ClusterTx, name string, address string, schema int, api int) error {
+	nodes, err := tx.Nodes()
+	if err != nil {
+		return errors.Wrap(err, "failed to fetch current cluster nodes")
+	}
+	if len(nodes) == 0 {
+		return fmt.Errorf("clustering not enabled")
+	}
+
+	for _, node := range nodes {
+		if node.Name == name {
+			return fmt.Errorf("cluster already has node with name %s", name)
+		}
+		if node.Address == address {
+			return fmt.Errorf("cluster already has node with address %s", address)
+		}
+		if node.Schema != schema {
+			return fmt.Errorf("schema version mismatch: cluster has %d", node.Schema)
+		}
+		if node.APIExtensions != api {
+			return fmt.Errorf("API version mismatch: cluster has %d", node.APIExtensions)
+		}
+	}
+
+	return nil
+}
+
+// Check that there is no left-over cluster certificate in the LXD var dir of
+// this node.
+func membershipCheckNoLeftoverClusterCert(dir string) error {
+	// Sanity check that there's no leftover cluster certificate
+	for _, basename := range []string{"cluster.crt", "cluster.key", "cluster.ca"} {
+		if shared.PathExists(filepath.Join(dir, basename)) {
+			return fmt.Errorf("inconsistent state: found leftover cluster certificate")
+		}
+	}
+	return nil
+}
+
+// SchemaVersion holds the version of the cluster database schema.
+var SchemaVersion = cluster.SchemaVersion
+
+// We currently aim at having 3 nodes part of the raft dqlite cluster.
+//
+// TODO: this number should probably be configurable.
+const membershipMaxRaftNodes = 3
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 670aecfad..f6e9fe61d 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -1,6 +1,7 @@
 package cluster_test
 
 import (
+	"fmt"
 	"io/ioutil"
 	"net/http"
 	"path/filepath"
@@ -11,6 +12,7 @@ import (
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/version"
 	"github.com/mpvl/subtest"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
@@ -127,6 +129,117 @@ func TestBootstrap(t *testing.T) {
 	require.NoError(t, conn.Close())
 }
 
+// If pre-conditions are not met, a descriptive error is returned.
+func TestAccept_UnmetPreconditions(t *testing.T) {
+	cases := []struct {
+		name    string
+		address string
+		schema  int
+		api     int
+		setup   func(*membershipFixtures)
+		error   string
+	}{
+		{
+			"buzz",
+			"1.2.3.4:666",
+			cluster.SchemaVersion,
+			len(version.APIExtensions),
+			func(f *membershipFixtures) {},
+			"clustering not enabled",
+		},
+		{
+			"rusp",
+			"1.2.3.4:666",
+			cluster.SchemaVersion,
+			len(version.APIExtensions),
+			func(f *membershipFixtures) {
+				f.ClusterNode("5.6.7.8:666")
+			},
+			"cluster already has node with name rusp",
+		},
+		{
+			"buzz",
+			"5.6.7.8:666",
+			cluster.SchemaVersion,
+			len(version.APIExtensions),
+			func(f *membershipFixtures) {
+				f.ClusterNode("5.6.7.8:666")
+			},
+			"cluster already has node with address 5.6.7.8:666",
+		},
+		{
+			"buzz",
+			"1.2.3.4:666",
+			cluster.SchemaVersion - 1,
+			len(version.APIExtensions),
+			func(f *membershipFixtures) {
+				f.ClusterNode("5.6.7.8:666")
+			},
+			fmt.Sprintf("schema version mismatch: cluster has %d", cluster.SchemaVersion),
+		},
+		{
+			"buzz",
+			"1.2.3.4:666",
+			cluster.SchemaVersion,
+			len(version.APIExtensions) - 1,
+			func(f *membershipFixtures) {
+				f.ClusterNode("5.6.7.8:666")
+			},
+			fmt.Sprintf("API version mismatch: cluster has %d", len(version.APIExtensions)),
+		},
+	}
+	for _, c := range cases {
+		subtest.Run(t, c.error, func(t *testing.T) {
+			state, cleanup := state.NewTestState(t)
+			defer cleanup()
+
+			c.setup(&membershipFixtures{t: t, state: state})
+
+			_, err := cluster.Accept(state, c.name, c.address, c.schema, c.api)
+			assert.EqualError(t, err, c.error)
+		})
+	}
+}
+
+// When a node gets accepted, it gets included in the raft nodes.
+func TestAccept(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	f := &membershipFixtures{t: t, state: state}
+	f.RaftNode("1.2.3.4:666")
+	f.ClusterNode("1.2.3.4:666")
+
+	nodes, err := cluster.Accept(
+		state, "buzz", "5.6.7.8:666", cluster.SchemaVersion, len(version.APIExtensions))
+	assert.NoError(t, err)
+	assert.Len(t, nodes, 2)
+	assert.Equal(t, int64(1), nodes[0].ID)
+	assert.Equal(t, int64(2), nodes[1].ID)
+	assert.Equal(t, "1.2.3.4:666", nodes[0].Address)
+	assert.Equal(t, "5.6.7.8:666", nodes[1].Address)
+}
+
+// If the cluster has already reached its maximum number of raft nodes, the
+// joining node is not included in the returned raft nodes list.
+func TestAccept_MaxRaftNodes(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	f := &membershipFixtures{t: t, state: state}
+	f.RaftNode("1.1.1.1:666")
+	f.RaftNode("2.2.2.2:666")
+	f.RaftNode("3.3.3.3:666")
+	f.ClusterNode("1.2.3.4:666")
+
+	nodes, err := cluster.Accept(
+		state, "buzz", "4.5.6.7:666", cluster.SchemaVersion, len(version.APIExtensions))
+	assert.NoError(t, err)
+	for _, node := range nodes {
+		assert.NotEqual(t, "4.5.6.7:666", node.Address)
+	}
+}
+
 // Helper for setting fixtures for Bootstrap tests.
 type membershipFixtures struct {
 	t     *testing.T
diff --git a/shared/cert.go b/shared/cert.go
index eee7c91a7..4f6f23af4 100644
--- a/shared/cert.go
+++ b/shared/cert.go
@@ -101,6 +101,16 @@ func (c *CertInfo) PublicKey() []byte {
 	return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: data})
 }
 
+// PrivateKey is a convenience to encode the underlying private key.
+func (c *CertInfo) PrivateKey() []byte {
+	key, ok := c.KeyPair().PrivateKey.(*rsa.PrivateKey)
+	if !ok {
+		return nil
+	}
+	data := x509.MarshalPKCS1PrivateKey(key)
+	return pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: data})
+}
+
 // CertKind defines the kind of certificate to generate from scratch in
 // KeyPairAndCA when it's not there.
 //

From fad16deae171721822a955e235988e446fbfb751 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 16 Oct 2017 19:53:02 +0000
Subject: [PATCH 025/227] Add cluster.Join

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 102 +++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/membership_test.go |  54 ++++++++++++++++++++++
 2 files changed, 156 insertions(+)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index f137dd26d..e923f0e05 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -4,12 +4,17 @@ import (
 	"fmt"
 	"os"
 	"path/filepath"
+	"strconv"
+	"time"
 
+	"github.com/hashicorp/raft"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/log15"
+	"github.com/lxc/lxd/shared/logger"
 	"github.com/pkg/errors"
 )
 
@@ -183,6 +188,103 @@ func Accept(state *state.State, name, address string, schema, api int) ([]db.Raf
 	return nodes, nil
 }
 
+// Join makes a non-clustered LXD node join an existing cluster.
+//
+// It's assumed that Accept() was previously called against the target node,
+// which handed the raft server ID.
+//
+// The cert parameter must contain the keypair/CA material of the cluster being
+// joined.
+func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name string, nodes []db.RaftNode) error {
+	// Check parameters
+	if name == "" {
+		return fmt.Errorf("node name must not be empty")
+	}
+
+	var address string
+	err := state.Node.Transaction(func(tx *db.NodeTx) error {
+		// Fetch current network address and raft nodes
+		config, err := node.ConfigLoad(tx)
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch node configuration")
+		}
+		address = config.HTTPSAddress()
+
+		// Make sure node-local database state is in order.
+		err = membershipCheckNodeStateForBootstrapOrJoin(tx, address)
+		if err != nil {
+			return err
+		}
+
+		// Set the raft nodes list to the one that was returned by Accept().
+		err = tx.RaftNodesReplace(nodes)
+		if err != nil {
+			return errors.Wrap(err, "failed to set raft nodes")
+		}
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	// Shutdown the gateway and wipe any raft data. This will trash any
+	// gRPC SQL connection against our in-memory dqlite driver and shutdown
+	// the associated raft instance.
+	err = gateway.Shutdown()
+	if err != nil {
+		return errors.Wrap(err, "failed to shutdown gRPC SQL gateway")
+	}
+	err = os.RemoveAll(filepath.Join(state.OS.VarDir, "raft"))
+	if err != nil {
+		return errors.Wrap(err, "failed to remove existing raft data")
+	}
+
+	// Re-initialize the gateway. This will create a new raft factory an
+	// dqlite driver instance, which will be exposed over gRPC by the
+	// gateway handlers.
+	gateway.cert = cert
+	err = gateway.init()
+	if err != nil {
+		return errors.Wrap(err, "failed to re-initialize gRPC SQL gateway")
+	}
+
+	// If we are listed among the database nodes, join the raft cluster.
+	id := ""
+	target := ""
+	for _, node := range nodes {
+		if node.Address == address {
+			id = strconv.Itoa(int(node.ID))
+		} else {
+			target = node.Address
+		}
+	}
+	if id != "" {
+		logger.Info(
+			"Joining dqlite raft cluster",
+			log15.Ctx{"id": id, "address": address, "target": target})
+		changer := gateway.raft.MembershipChanger()
+		err := changer.Join(raft.ServerID(id), raft.ServerAddress(target), 5*time.Second)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Make sure we can actually connect to the cluster database through
+	// the network endpoint. This also makes the Go SQL pooling system
+	// invalidate the old connection, so new queries will be executed over
+	// the new gRPC network connection.
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		_, err := tx.Nodes()
+		return err
+	})
+	if err != nil {
+		return errors.Wrap(err, "cluster database initialization failed")
+	}
+
+	return nil
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index f6e9fe61d..70e3ad224 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -240,6 +240,60 @@ func TestAccept_MaxRaftNodes(t *testing.T) {
 	}
 }
 
+func TestJoin(t *testing.T) {
+	// Setup a target node running as leader of a cluster.
+	targetState, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	targetCert := shared.TestingKeyPair()
+	targetGateway := newGateway(t, targetState.Node, targetCert)
+	defer targetGateway.Shutdown()
+
+	targetMux := http.NewServeMux()
+	targetServer := newServer(targetCert, targetMux)
+	defer targetServer.Close()
+
+	for path, handler := range targetGateway.HandlerFuncs() {
+		targetMux.HandleFunc(path, handler)
+	}
+
+	targetAddress := targetServer.Listener.Addr().String()
+	targetF := &membershipFixtures{t: t, state: targetState}
+	targetF.NetworkAddress(targetAddress)
+
+	err := cluster.Bootstrap(targetState, targetGateway, "buzz")
+	require.NoError(t, err)
+
+	// Setup a joining node
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	cert := shared.TestingAltKeyPair()
+	gateway := newGateway(t, state.Node, cert)
+	defer gateway.Shutdown()
+
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	defer server.Close()
+
+	for path, handler := range gateway.HandlerFuncs() {
+		mux.HandleFunc(path, handler)
+	}
+
+	address := server.Listener.Addr().String()
+	f := &membershipFixtures{t: t, state: state}
+	f.NetworkAddress(address)
+
+	// Accept the joining node.
+	nodes, err := cluster.Accept(
+		targetState, "rusp", address, cluster.SchemaVersion, len(version.APIExtensions))
+	require.NoError(t, err)
+
+	// Actually join the cluster.
+	err = cluster.Join(state, gateway, targetCert, "rusp", nodes)
+	require.NoError(t, err)
+}
+
 // Helper for setting fixtures for Bootstrap tests.
 type membershipFixtures struct {
 	t     *testing.T

From 0b5c345467cbdad00d931f05caf8f42d31ff2d44 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 16 Oct 2017 19:56:02 +0000
Subject: [PATCH 026/227] Add LXC client AcceptNode() and JoinCluster()

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go     |   2 +
 client/lxd_cluster.go    |  37 +++++++++++++++-
 lxd/api_cluster.go       | 113 ++++++++++++++++++++++++++++++++++++++++++++++-
 lxd/endpoints/network.go |   9 ++++
 lxd/main_init.go         |   5 ++-
 lxd/util/encryption.go   |  24 ++++++++++
 shared/api/cluster.go    |  21 ++++++++-
 7 files changed, 207 insertions(+), 4 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index c509ca92f..2bb25a009 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -163,6 +163,8 @@ type ContainerServer interface {
 
 	// Cluster functions ("cluster" API extensions)
 	BootstrapCluster(name string) (op *Operation, err error)
+	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
+	JoinCluster(targetAddress, targetPassword string, targetCert []byte, name string) (op *Operation, err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 672665ccb..4c8de7bad 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -2,7 +2,7 @@ package lxd
 
 import "github.com/lxc/lxd/shared/api"
 
-// BootstrapCluster request to bootstrap a new cluster.
+// BootstrapCluster requests to bootstrap a new cluster.
 func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
 	cluster := api.ClusterPost{Name: name}
 	op, _, err := r.queryOperation("POST", "/cluster", cluster, "")
@@ -12,3 +12,38 @@ func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
 
 	return op, nil
 }
+
+// AcceptNode requests to accept a new node into the cluster.
+func (r *ProtocolLXD) AcceptNode(targetPassword, name, address string, schema, apiExt int) (*api.ClusterNodeAccepted, error) {
+	cluster := api.ClusterPost{
+		Name:           name,
+		Address:        address,
+		Schema:         schema,
+		API:            apiExt,
+		TargetPassword: targetPassword,
+	}
+	info := &api.ClusterNodeAccepted{}
+	_, err := r.queryStruct("POST", "/cluster", cluster, "", &info)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return info, nil
+}
+
+// JoinCluster requests to join an existing cluster.
+func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword string, targetCert []byte, name string) (*Operation, error) {
+	cluster := api.ClusterPost{
+		TargetAddress:  targetAddress,
+		TargetPassword: targetPassword,
+		TargetCert:     targetCert,
+		Name:           name,
+	}
+	op, _, err := r.queryOperation("POST", "/cluster", cluster, "")
+	if err != nil {
+		return nil, err
+	}
+
+	return op, nil
+}
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 4e8bb0419..6cfcd4606 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -5,11 +5,16 @@ import (
 	"fmt"
 	"net/http"
 
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/pkg/errors"
 )
 
-var clusterCmd = Command{name: "cluster", post: clusterPost}
+var clusterCmd = Command{name: "cluster", untrustedPost: true, post: clusterPost}
 
 func clusterPost(d *Daemon, r *http.Request) Response {
 	req := api.ClusterPost{}
@@ -25,10 +30,116 @@ func clusterPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("No name provided"))
 	}
 
+	// Depending on the provided parameters we either bootstrap a brand new
+	// cluster with this node as first node, or accept a node into our
+	// cluster, or perform a request to join a given cluster.
+	trusted := d.checkTrustedClient(r) == nil
+	if req.Address == "" && req.TargetAddress == "" {
+		// Bootstrapping a node requires the client to be trusted.
+		if !trusted {
+			return Forbidden
+		}
+		return clusterPostBootstrap(d, req)
+	} else if req.TargetAddress == "" {
+		return clusterPostAccept(d, req)
+	} else {
+		// Joining an existing cluster requires the client to be
+		// trusted.
+		if !trusted {
+			return Forbidden
+		}
+		return clusterPostJoin(d, req)
+	}
+}
+
+func clusterPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 	run := func(op *operation) error {
 		return cluster.Bootstrap(d.State(), d.gateway, req.Name)
 	}
+	resources := map[string][]string{}
+	resources["cluster"] = []string{}
+
+	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	return OperationResponse(op)
+}
+
+func clusterPostAccept(d *Daemon, req api.ClusterPost) Response {
+	// Accepting a node requires the client to provide the correct
+	// trust password.
+	secret := daemonConfig["core.trust_password"].Get()
+	if util.PasswordCheck(secret, req.TargetPassword) != nil {
+		return Forbidden
+	}
+	nodes, err := cluster.Accept(d.State(), req.Name, req.Address, req.Schema, req.API)
+	if err != nil {
+		return BadRequest(err)
+	}
+	accepted := api.ClusterNodeAccepted{
+		RaftNodes:  make([]api.RaftNode, len(nodes)),
+		PrivateKey: d.endpoints.NetworkPrivateKey(),
+	}
+	for i, node := range nodes {
+		accepted.RaftNodes[i].ID = node.ID
+		accepted.RaftNodes[i].Address = node.Address
+	}
+	return SyncResponse(true, accepted)
+}
 
+func clusterPostJoin(d *Daemon, req api.ClusterPost) Response {
+	// Make sure basic pre-conditions are ment.
+	if len(req.TargetCert) == 0 {
+		return BadRequest(fmt.Errorf("No target cluster node certificate provided"))
+	}
+	address := daemonConfig["core.https_address"].Get()
+	if address == "" {
+		return BadRequest(fmt.Errorf("No core.https_address config key is set on this node"))
+	}
+
+	// Client parameters to connect to the target cluster node.
+	args := &lxd.ConnectionArgs{
+		TLSServerCert: string(req.TargetCert),
+		TLSCA:         string(req.TargetCA),
+	}
+
+	// Asynchronously join the cluster.
+	run := func(op *operation) error {
+		// First request for this node to be added to the list of
+		// cluster nodes.
+		client, err := lxd.ConnectLXD(req.TargetAddress, args)
+		if err != nil {
+			return err
+		}
+		info, err := client.AcceptNode(
+			req.TargetPassword, req.Name, address, cluster.SchemaVersion,
+			len(version.APIExtensions))
+		if err != nil {
+			return errors.Wrap(err, "failed to request to add node")
+		}
+
+		// Update our TLS configuration using the returned cluster certificate.
+		err = util.WriteCert(d.os.VarDir, "cluster", req.TargetCert, info.PrivateKey, req.TargetCA)
+		if err != nil {
+			return errors.Wrap(err, "failed to save cluster certificate")
+		}
+		cert, err := util.LoadCert(d.os.VarDir)
+		if err != nil {
+			return errors.Wrap(err, "failed to parse cluster certificate")
+		}
+		d.endpoints.NetworkUpdateCert(cert)
+
+		// Update local setup and possibly join the raft dqlite
+		// cluster.
+		nodes := make([]db.RaftNode, len(info.RaftNodes))
+		for i, node := range info.RaftNodes {
+			nodes[i].ID = node.ID
+			nodes[i].Address = node.Address
+		}
+		return cluster.Join(d.State(), d.gateway, cert, req.Name, nodes)
+	}
 	resources := map[string][]string{}
 	resources["cluster"] = []string{}
 
diff --git a/lxd/endpoints/network.go b/lxd/endpoints/network.go
index 5da1bc573..6d6ddb42d 100644
--- a/lxd/endpoints/network.go
+++ b/lxd/endpoints/network.go
@@ -22,6 +22,15 @@ func (e *Endpoints) NetworkPublicKey() []byte {
 	return e.cert.PublicKey()
 }
 
+// NetworkPrivateKey returns the private key of the TLS certificate used by the
+// network endpoint.
+func (e *Endpoints) NetworkPrivateKey() []byte {
+	e.mu.RLock()
+	defer e.mu.RUnlock()
+
+	return e.cert.PrivateKey()
+}
+
 // NetworkAddress returns the network addresss of the network endpoint, or an
 // empty string if there's no network endpoint
 func (e *Endpoints) NetworkAddress() string {
diff --git a/lxd/main_init.go b/lxd/main_init.go
index 43cf13bed..aa60ac8aa 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -508,7 +508,10 @@ func (cmd *CmdInit) initCluster(client lxd.ContainerServer, cluster api.ClusterP
 	if err != nil {
 		return nil, err
 	}
-	op.Wait()
+	err = op.Wait()
+	if err != nil {
+		return nil, err
+	}
 	return reverter, nil
 }
 
diff --git a/lxd/util/encryption.go b/lxd/util/encryption.go
index 43e7aecaf..cb5f939ea 100644
--- a/lxd/util/encryption.go
+++ b/lxd/util/encryption.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"encoding/hex"
 	"fmt"
+	"io/ioutil"
 	"path/filepath"
 
 	"github.com/lxc/lxd/shared"
@@ -51,3 +52,26 @@ func LoadCert(dir string) (*shared.CertInfo, error) {
 	}
 	return cert, nil
 }
+
+// WriteCert writes the given material to the appropriate certificate files in
+// the given LXD var directory.
+func WriteCert(dir, prefix string, cert, key, ca []byte) error {
+	err := ioutil.WriteFile(filepath.Join(dir, prefix+".crt"), cert, 0644)
+	if err != nil {
+		return err
+	}
+
+	err = ioutil.WriteFile(filepath.Join(dir, prefix+".key"), key, 0600)
+	if err != nil {
+		return err
+	}
+
+	if ca != nil {
+		err = ioutil.WriteFile(filepath.Join(dir, prefix+".ca"), ca, 0644)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 847264214..4f54d2ada 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -5,5 +5,24 @@ package api
 //
 // API extension: cluster
 type ClusterPost struct {
-	Name string `json:"name" yaml:"name"`
+	Name           string `json:"name" yaml:"name"`
+	Address        string `json:"address" yaml:"address"`
+	Schema         int    `json:"schema" yaml:"schema"`
+	API            int    `json:"api" yaml:"api"`
+	TargetAddress  string `json:"target_address" yaml:"target_address"`
+	TargetCert     []byte `json:"target_cert" yaml:"target_cert"`
+	TargetCA       []byte `json:"target_ca" yaml:"target_ca"`
+	TargetPassword string `json:"target_password" yaml:"target_password"`
+}
+
+// ClusterNodeAccepted represents the response of a request to join a cluster.
+type ClusterNodeAccepted struct {
+	RaftNodes  []RaftNode `json:"raft_nodes" yaml:"raft_nodes"`
+	PrivateKey []byte     `json:"private_key" yaml:"private_key"`
+}
+
+// RaftNode represents the a LXD node that is part of the dqlite raft cluster.
+type RaftNode struct {
+	ID      int64  `json:"id" yaml:"id"`
+	Address string `json:"address" yaml:"address"`
 }

From e490291738c8e84eaeb2b70759a58c37a708ce26 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 16 Oct 2017 21:39:02 +0000
Subject: [PATCH 027/227] Change lxd init to support joining a cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go |  2 +-
 lxd/main_init.go   | 95 ++++++++++++++++++++++++++++++++++++++++++------------
 2 files changed, 75 insertions(+), 22 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 6cfcd4606..62d9fab0e 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -109,7 +109,7 @@ func clusterPostJoin(d *Daemon, req api.ClusterPost) Response {
 	run := func(op *operation) error {
 		// First request for this node to be added to the list of
 		// cluster nodes.
-		client, err := lxd.ConnectLXD(req.TargetAddress, args)
+		client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", req.TargetAddress), args)
 		if err != nil {
 			return err
 		}
diff --git a/lxd/main_init.go b/lxd/main_init.go
index aa60ac8aa..4cd6a6f94 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -1,6 +1,7 @@
 package main
 
 import (
+	"encoding/pem"
 	"fmt"
 	"net"
 	"os"
@@ -136,17 +137,31 @@ func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerS
 	if err != nil {
 		return err
 	}
-	storage, err := cmd.askStorage(client, existingPools, backendsAvailable)
-	if err != nil {
-		return err
-	}
-	defaultPrivileged := cmd.askDefaultPrivileged()
 
-	// Ask about networking only if we skipped the clustering questions.
+	// Ask to create basic entities only if we are not joining an existing
+	// cluster.
+	var storage *cmdInitStorageParams
+	var defaultPrivileged int
 	var networking *cmdInitNetworkingParams
-	if clustering == nil {
-		networking = cmd.askNetworking()
-	} else {
+	var imagesAutoUpdate bool
+	var bridge *cmdInitBridgeParams
+
+	if clustering == nil || clustering.TargetAddress == "" {
+		storage, err = cmd.askStorage(client, existingPools, backendsAvailable)
+		if err != nil {
+			return err
+		}
+		defaultPrivileged = cmd.askDefaultPrivileged()
+
+		// Ask about networking only if we skipped the clustering questions.
+		if clustering == nil {
+			networking = cmd.askNetworking()
+		}
+
+		imagesAutoUpdate = cmd.askImages()
+		bridge = cmd.askBridge(client)
+	}
+	if clustering != nil {
 		// Re-use the answers to the clustering questions.
 		networking = &cmdInitNetworkingParams{
 			Address:       clustering.Address,
@@ -155,9 +170,6 @@ func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerS
 		}
 	}
 
-	imagesAutoUpdate := cmd.askImages()
-	bridge := cmd.askBridge(client)
-
 	_, err = exec.LookPath("dnsmasq")
 	if err != nil && bridge != nil {
 		return fmt.Errorf("LXD managed bridges require \"dnsmasq\". Install it and try again.")
@@ -225,6 +237,9 @@ func (cmd *CmdInit) fillDataWithClustering(data *cmdInitData, clustering *cmdIni
 		return
 	}
 	data.Cluster.Name = clustering.Name
+	data.Cluster.TargetAddress = clustering.TargetAddress
+	data.Cluster.TargetCert = clustering.TargetCert
+	data.Cluster.TargetPassword = clustering.TargetPassword
 }
 
 // Fill the given init data with a new storage pool structure matching the
@@ -504,9 +519,19 @@ func (cmd *CmdInit) initConfig(client lxd.ContainerServer, config map[string]int
 // Turn on clustering.
 func (cmd *CmdInit) initCluster(client lxd.ContainerServer, cluster api.ClusterPost) (reverter, error) {
 	var reverter func() error
-	op, err := client.BootstrapCluster(cluster.Name)
-	if err != nil {
-		return nil, err
+	var op *lxd.Operation
+	var err error
+	if cluster.TargetAddress == "" {
+		op, err = client.BootstrapCluster(cluster.Name)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		op, err = client.JoinCluster(
+			cluster.TargetAddress, cluster.TargetPassword, cluster.TargetCert, cluster.Name)
+		if err != nil {
+			return nil, err
+		}
 	}
 	err = op.Wait()
 	if err != nil {
@@ -762,7 +787,33 @@ func (cmd *CmdInit) askClustering() (*cmdInitClusteringParams, error) {
 		return params, nil
 	}
 
-	return nil, fmt.Errorf("joining cluster not yet implemented")
+	// Target node address, password and certificate.
+join:
+	params.TargetAddress = cmd.Context.AskString("IP address or FQDN of an existing cluster node: ", "", nil)
+	params.TargetPassword = cmd.Context.AskPassword(
+		"Trust password for the existing cluster: ", cmd.PasswordReader)
+
+	url := fmt.Sprintf("https://%s", params.TargetAddress)
+	certificate, err := shared.GetRemoteCertificate(url)
+	if err != nil {
+		cmd.Context.Output("Error connecting to existing cluster node: %v\n", err)
+		goto join
+	}
+	digest := shared.CertFingerprint(certificate)
+	askFingerprint := fmt.Sprintf("Remote node fingerprint: %s ok (y/n)? ", digest)
+	if !cmd.Context.AskBool(askFingerprint, "") {
+		return nil, fmt.Errorf("Cluster certificate NACKed by user")
+	}
+	params.TargetCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certificate.Raw})
+
+	// Confirm wipe this node
+	askConfirm := ("All existing data is lost when joining a cluster, " +
+		"continue? (yes/no) [default=no] ")
+	if !cmd.Context.AskBool(askConfirm, "") {
+		return nil, fmt.Errorf("User did not confirm erasing data")
+	}
+
+	return params, nil
 }
 
 // Ask if the user wants to create a new storage pool, and return
@@ -1040,11 +1091,13 @@ type cmdInitData struct {
 
 // Parameters needed when enbling clustering in interactive mode.
 type cmdInitClusteringParams struct {
-	Name          string // Name of the new node
-	Address       string // Network address of the new node
-	Port          int64  // Network port of the new node
-	Join          string // Network address of existing node to join.
-	TrustPassword string // Trust password
+	Name           string // Name of the new node
+	Address        string // Network address of the new node
+	Port           int64  // Network port of the new node
+	TrustPassword  string // Trust password
+	TargetAddress  string // Network address of cluster node to join.
+	TargetCert     []byte // Public key of the cluster to join.
+	TargetPassword string // Trust password of the cluster to join.
 }
 
 // Parameters needed when creating a storage pool in interactive or auto

From 92315e3fc4cec8267b4cf8ae524a0c210dd3ed25 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 11:58:56 +0000
Subject: [PATCH 028/227] Add config table to cluster schema

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go      |  8 +++++++-
 lxd/db/cluster/update.go      | 14 ++++++++++++++
 lxd/db/cluster/update_test.go | 13 +++++++++++++
 3 files changed, 34 insertions(+), 1 deletion(-)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 90a358e96..76302fbf7 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -6,6 +6,12 @@ package cluster
 // modify the database schema, please add a new schema update to update.go
 // and the run 'make update-schema'.
 const freshSchema = `
+CREATE TABLE config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (key)
+);
 CREATE TABLE nodes (
     id INTEGER PRIMARY KEY,
     name TEXT NOT NULL,
@@ -18,5 +24,5 @@ CREATE TABLE nodes (
     UNIQUE (address)
 );
 
-INSERT INTO schema (version, updated_at) VALUES (1, strftime("%s"))
+INSERT INTO schema (version, updated_at) VALUES (2, strftime("%s"))
 `
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 3d43e9b2e..33006db06 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -24,8 +24,22 @@ var SchemaVersion = len(updates)
 
 var updates = map[int]schema.Update{
 	1: updateFromV0,
+	2: updateFromV1,
 }
 
+func updateFromV1(tx *sql.Tx) error {
+	// config table
+	stmt := `
+CREATE TABLE config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (key)
+);
+`
+	_, err := tx.Exec(stmt)
+	return err
+}
 func updateFromV0(tx *sql.Tx) error {
 	// v0..v1 the dawn of clustering
 	stmt := `
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index c80a51574..f637f5083 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -24,3 +24,16 @@ func TestUpdateFromV0(t *testing.T) {
 	_, err = db.Exec("INSERT INTO nodes VALUES (3, 'bar', 'gasp', '1.2.3.4:666', 9, 11), ?)", time.Now())
 	require.Error(t, err)
 }
+
+func TestUpdateFromV1(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(2, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO config VALUES (1, 'foo', 'blah')")
+	require.NoError(t, err)
+
+	// Unique constraint on key.
+	_, err = db.Exec("INSERT INTO config VALUES (2, 'foo', 'gosh')")
+	require.Error(t, err)
+}

From a7435d0201b003decadcc427a3c95d25a13f7366 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 12:37:16 +0000
Subject: [PATCH 029/227] Always use the node db to get core.https_address

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go               |  7 ++++++-
 lxd/api_cluster.go           |  6 +++++-
 lxd/daemon.go                |  6 +++++-
 lxd/main_activateifneeded.go |  8 ++++----
 lxd/main_init_test.go        | 16 ++++++++++------
 lxd/node/config.go           | 15 +++++++++++++++
 lxd/node/config_test.go      | 24 ++++++++++++++++++++++++
 7 files changed, 69 insertions(+), 13 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 501e21dcc..720892450 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -9,6 +9,7 @@ import (
 	"gopkg.in/lxc/go-lxc.v2"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -85,7 +86,11 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
-	addresses, err := util.ListenAddresses(daemonConfig["core.https_address"].Get())
+	address, err := node.HTTPSAddress(d.db)
+	if err != nil {
+		return InternalError(err)
+	}
+	addresses, err := util.ListenAddresses(address)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 62d9fab0e..00ff4a0ff 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -8,6 +8,7 @@ import (
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/version"
@@ -94,7 +95,10 @@ func clusterPostJoin(d *Daemon, req api.ClusterPost) Response {
 	if len(req.TargetCert) == 0 {
 		return BadRequest(fmt.Errorf("No target cluster node certificate provided"))
 	}
-	address := daemonConfig["core.https_address"].Get()
+	address, err := node.HTTPSAddress(d.db)
+	if err != nil {
+		return InternalError(err)
+	}
 	if address == "" {
 		return BadRequest(fmt.Errorf("No core.https_address config key is set on this node"))
 	}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index e13675cdf..d440a725f 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -30,6 +30,7 @@ import (
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/endpoints"
 	"github.com/lxc/lxd/lxd/maas"
+	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/sys"
 	"github.com/lxc/lxd/lxd/task"
@@ -404,7 +405,10 @@ func (d *Daemon) init() error {
 		}
 	}
 
-	address := daemonConfig["core.https_address"].Get()
+	address, err := node.HTTPSAddress(d.db)
+	if err != nil {
+		return errors.Wrap(err, "failed to fetch node address")
+	}
 
 	/* Open the cluster database */
 	d.cluster, err = db.OpenCluster("db.bin", d.gateway.Dialer(), address)
diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go
index 17174d5bd..e0d997e93 100644
--- a/lxd/main_activateifneeded.go
+++ b/lxd/main_activateifneeded.go
@@ -6,6 +6,7 @@ import (
 
 	"github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/idmap"
 	"github.com/lxc/lxd/shared/logger"
@@ -30,15 +31,14 @@ func cmdActivateIfNeeded(args *Args) error {
 		return err
 	}
 
-	/* Load all config values from the database */
-	err = daemonConfigInit(d.db.DB())
+	/* Load the configured address the database */
+	address, err := node.HTTPSAddress(d.db)
 	if err != nil {
 		return err
 	}
 
 	// Look for network socket
-	value := daemonConfig["core.https_address"].Get()
-	if value != "" {
+	if address != "" {
 		logger.Debugf("Daemon has core.https_address set, activating...")
 		_, err := lxd.ConnectLXDUnix("", nil)
 		return err
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 36829b5d4..8b3a2ad9a 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -8,6 +8,7 @@ import (
 	"testing"
 
 	"github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
 
 	"github.com/lxc/lxd/shared"
@@ -87,8 +88,9 @@ func (suite *cmdInitTestSuite) TestCmdInit_PreseedHTTPSAddressAndTrustPassword()
 `, port))
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["core.https_address"]
-	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), key.Get())
+	address, err := node.HTTPSAddress(suite.d.db)
+	suite.Req.NoError(err)
+	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
 	secret := daemonConfig["core.trust_password"].Get()
 	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
 }
@@ -109,8 +111,9 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveHTTPSAddressAndTrustPasswo
 
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["core.https_address"]
-	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), key.Get())
+	address, err := node.HTTPSAddress(suite.d.db)
+	suite.Req.NoError(err)
+	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
 	secret := daemonConfig["core.trust_password"].Get()
 	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
 }
@@ -147,8 +150,9 @@ func (suite *cmdInitTestSuite) TestCmdInit_AutoHTTPSAddressAndTrustPassword() {
 
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["core.https_address"]
-	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), key.Get())
+	address, err := node.HTTPSAddress(suite.d.db)
+	suite.Req.NoError(err)
+	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
 	secret := daemonConfig["core.trust_password"].Get()
 	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
 }
diff --git a/lxd/node/config.go b/lxd/node/config.go
index 7aa6371ba..8e305d5b0 100644
--- a/lxd/node/config.go
+++ b/lxd/node/config.go
@@ -57,6 +57,21 @@ func (c *Config) Patch(patch map[string]interface{}) error {
 	return c.update(values)
 }
 
+// HTTPSAddress is a convenience for loading the node configuration and
+// returning the value of core.https_address.
+func HTTPSAddress(node *db.Node) (string, error) {
+	var config *Config
+	err := node.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		config, err = ConfigLoad(tx)
+		return err
+	})
+	if err != nil {
+		return "", err
+	}
+	return config.HTTPSAddress(), nil
+}
+
 func (c *Config) update(values map[string]interface{}) error {
 	changed, err := c.m.Change(values)
 	if err != nil {
diff --git a/lxd/node/config_test.go b/lxd/node/config_test.go
index 7a701b204..b7ed60768 100644
--- a/lxd/node/config_test.go
+++ b/lxd/node/config_test.go
@@ -92,3 +92,27 @@ func TestConfig_PatchKeepsValues(t *testing.T) {
 	require.NoError(t, err)
 	assert.Equal(t, map[string]string{"core.https_address": "127.0.0.1:666"}, values)
 }
+
+// The core.https_address config key is fetched from the db with a new
+// transaction.
+func TestHTTPSAddress(t *testing.T) {
+	nodeDB, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	address, err := node.HTTPSAddress(nodeDB)
+	require.NoError(t, err)
+	assert.Equal(t, "", address)
+
+	err = nodeDB.Transaction(func(tx *db.NodeTx) error {
+		config, err := node.ConfigLoad(tx)
+		require.NoError(t, err)
+		err = config.Replace(map[string]interface{}{"core.https_address": "127.0.0.1:666"})
+		require.NoError(t, err)
+		return nil
+	})
+	require.NoError(t, err)
+
+	address, err = node.HTTPSAddress(nodeDB)
+	require.NoError(t, err)
+	assert.Equal(t, "127.0.0.1:666", address)
+}

From 435c71a74e15872adbdb866cc47d4fc967bc81cb Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 13:43:47 +0000
Subject: [PATCH 030/227] Delete core.https_address from daemonConfig

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go       | 45 ++++++++++++++++++++++++++++++++++++++++++---
 lxd/daemon_config.go | 35 +++++++++++++++++++++++------------
 lxd/daemon_test.go   |  6 ++++--
 lxd/db/config.go     |  5 +++++
 4 files changed, 74 insertions(+), 17 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 720892450..ea0a15d3f 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -8,6 +8,7 @@ import (
 
 	"gopkg.in/lxc/go-lxc.v2"
 
+	"github.com/lxc/lxd/lxd/config"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
@@ -15,6 +16,7 @@ import (
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/osarch"
 	"github.com/lxc/lxd/shared/version"
+	"github.com/pkg/errors"
 )
 
 var api10 = []Command{
@@ -146,7 +148,10 @@ func api10Get(d *Daemon, r *http.Request) Response {
 
 	fullSrv := api.Server{ServerUntrusted: srv}
 	fullSrv.Environment = env
-	fullSrv.Config = daemonConfigRender()
+	fullSrv.Config, err = daemonConfigRender(d.State())
+	if err != nil {
+		return InternalError(err)
+	}
 
 	return SyncResponseETag(true, fullSrv, fullSrv.Config)
 }
@@ -157,7 +162,11 @@ func api10Put(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
-	err = util.EtagCheck(r, daemonConfigRender())
+	render, err := daemonConfigRender(d.State())
+	if err != nil {
+		return InternalError(err)
+	}
+	err = util.EtagCheck(r, render)
 	if err != nil {
 		return PreconditionFailed(err)
 	}
@@ -176,7 +185,11 @@ func api10Patch(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
-	err = util.EtagCheck(r, daemonConfigRender())
+	render, err := daemonConfigRender(d.State())
+	if err != nil {
+		return InternalError(err)
+	}
+	err = util.EtagCheck(r, render)
 	if err != nil {
 		return PreconditionFailed(err)
 	}
@@ -201,6 +214,32 @@ func api10Patch(d *Daemon, r *http.Request) Response {
 }
 
 func doApi10Update(d *Daemon, oldConfig map[string]string, req api.ServerPut) Response {
+	// The HTTPS address is the only config key that we want to save in the
+	// node-level database, so handle it here.
+	nodeValues := map[string]interface{}{}
+	address, ok := req.Config["core.https_address"]
+	if ok {
+		nodeValues["core.https_address"] = address
+		delete(req.Config, "core.https_address")
+	}
+	err := d.db.Transaction(func(tx *db.NodeTx) error {
+		trigger := config.Trigger{
+			Key: "core.https_address",
+			Func: func(value string) error {
+				return d.endpoints.NetworkUpdateAddress(value)
+			},
+		}
+		config, err := node.ConfigLoad(tx, trigger)
+		if err != nil {
+			return errors.Wrap(err, "failed to load node config")
+		}
+		err = config.Replace(nodeValues)
+		return err
+	})
+	if err != nil {
+		return InternalError(err)
+	}
+
 	// Deal with special keys
 	for k, v := range req.Config {
 		config := daemonConfig[k]
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index 741e9a624..9e83c1a56 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -15,6 +15,8 @@ import (
 	"golang.org/x/crypto/scrypt"
 
 	dbapi "github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
 )
@@ -180,7 +182,6 @@ func (k *daemonConfigKey) GetInt64() int64 {
 func daemonConfigInit(db *sql.DB) error {
 	// Set all the keys
 	daemonConfig = map[string]*daemonConfigKey{
-		"core.https_address":             {valueType: "string", setter: daemonConfigSetAddress},
 		"core.https_allowed_headers":     {valueType: "string"},
 		"core.https_allowed_methods":     {valueType: "string"},
 		"core.https_allowed_origin":      {valueType: "string"},
@@ -232,7 +233,7 @@ func daemonConfigInit(db *sql.DB) error {
 	return nil
 }
 
-func daemonConfigRender() map[string]interface{} {
+func daemonConfigRender(state *state.State) (map[string]interface{}, error) {
 	config := map[string]interface{}{}
 
 	// Turn the config into a JSON-compatible map
@@ -247,7 +248,26 @@ func daemonConfigRender() map[string]interface{} {
 		}
 	}
 
-	return config
+	err := state.Node.Transaction(func(tx *dbapi.NodeTx) error {
+		nodeConfig, err := node.ConfigLoad(tx)
+		if err != nil {
+			return err
+		}
+		for key, value := range nodeConfig.Dump() {
+			// FIXME: we can drop this conditional as soon as we
+			//        migrate all non-node-local keys to the cluster db
+			if key != "core.https_address" {
+				continue
+			}
+			config[key] = value
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return config, nil
 }
 
 func daemonConfigSetPassword(d *Daemon, key string, value string) (string, error) {
@@ -274,15 +294,6 @@ func daemonConfigSetPassword(d *Daemon, key string, value string) (string, error
 	return value, nil
 }
 
-func daemonConfigSetAddress(d *Daemon, key string, value string) (string, error) {
-	err := d.endpoints.NetworkUpdateAddress(value)
-	if err != nil {
-		return "", err
-	}
-
-	return value, nil
-}
-
 func daemonConfigSetMacaroonEndpoint(d *Daemon, key string, value string) (string, error) {
 	err := d.setupExternalAuthentication(value)
 	if err != nil {
diff --git a/lxd/daemon_test.go b/lxd/daemon_test.go
index fa98ec393..9ce47e92b 100644
--- a/lxd/daemon_test.go
+++ b/lxd/daemon_test.go
@@ -20,7 +20,8 @@ func (suite *daemonTestSuite) Test_config_value_set_empty_removes_val() {
 	val := daemonConfig["core.trust_password"].Get()
 	suite.Req.Equal(len(val), 192)
 
-	valMap := daemonConfigRender()
+	valMap, err := daemonConfigRender(d.State())
+	suite.Req.NoError(err)
 	value, present := valMap["core.trust_password"]
 	suite.Req.True(present)
 	suite.Req.Equal(value, true)
@@ -31,7 +32,8 @@ func (suite *daemonTestSuite) Test_config_value_set_empty_removes_val() {
 	val = daemonConfig["core.trust_password"].Get()
 	suite.Req.Equal(val, "")
 
-	valMap = daemonConfigRender()
+	valMap, err = daemonConfigRender(d.State())
+	suite.Req.NoError(err)
 	_, present = valMap["core.trust_password"]
 	suite.Req.False(present)
 }
diff --git a/lxd/db/config.go b/lxd/db/config.go
index e068c75ef..83eff3437 100644
--- a/lxd/db/config.go
+++ b/lxd/db/config.go
@@ -30,6 +30,11 @@ func ConfigValuesGet(db *sql.DB) (map[string]string, error) {
 	for rows.Next() {
 		var key, value string
 		rows.Scan(&key, &value)
+		// FIXME: we can get rid of this special casing as soon as we
+		//        move config keys to the cluster database.
+		if key == "core.https_address" {
+			continue
+		}
 		results[key] = value
 	}
 

From d34571519dcb74c866a3a794d198ad4993bf7eed Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 15:11:10 +0000
Subject: [PATCH 031/227] Add utilities to migrate data from the node db to the
 cluster db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/config.go         |  11 ++
 lxd/db/migration.go      | 101 ++++++++++++++++++
 lxd/db/migration_test.go | 260 +++++++++++++++++++++++++++++++++++++++++++++++
 lxd/db/query/config.go   |   2 +-
 lxd/db/query/expr.go     |   8 +-
 lxd/db/query/objects.go  |   2 +-
 6 files changed, 378 insertions(+), 6 deletions(-)
 create mode 100644 lxd/db/migration.go
 create mode 100644 lxd/db/migration_test.go

diff --git a/lxd/db/config.go b/lxd/db/config.go
index 83eff3437..36136ea5b 100644
--- a/lxd/db/config.go
+++ b/lxd/db/config.go
@@ -17,6 +17,17 @@ func (n *NodeTx) UpdateConfig(values map[string]string) error {
 	return query.UpdateConfig(n.tx, "config", values)
 }
 
+// Config fetches all LXD cluster config keys.
+func (c *ClusterTx) Config() (map[string]string, error) {
+	return query.SelectConfig(c.tx, "config")
+}
+
+// UpdateConfig updates the given LXD cluster configuration keys in the
+// config table. Config keys set to empty values will be deleted.
+func (c *ClusterTx) UpdateConfig(values map[string]string) error {
+	return query.UpdateConfig(c.tx, "config", values)
+}
+
 func ConfigValuesGet(db *sql.DB) (map[string]string, error) {
 	q := "SELECT key, value FROM config"
 	rows, err := dbQuery(db, q)
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
new file mode 100644
index 000000000..af9284d10
--- /dev/null
+++ b/lxd/db/migration.go
@@ -0,0 +1,101 @@
+package db
+
+import (
+	"database/sql"
+	"fmt"
+	"strings"
+
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/pkg/errors"
+)
+
+// LoadPreClusteringData loads all the data that before the introduction of
+// LXD clustering used to live in the node-level database.
+//
+// This is used for performing a one-off data migration when a LXD instance is
+// upgraded from a version without clustering to a version that supports
+// clustering, since in those version all data lives in the cluster database
+// (regardless of whether clustering is actually on or off).
+func LoadPreClusteringData(tx *sql.Tx) (*Dump, error) {
+	// Dump all tables.
+	tables := []string{
+		"config",
+	}
+	dump := &Dump{
+		Schema: map[string][]string{},
+		Data:   map[string][][]interface{}{},
+	}
+	for _, table := range tables {
+		data := [][]interface{}{}
+		stmt := fmt.Sprintf("SELECT * FROM %s", table)
+		rows, err := tx.Query(stmt)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to fetch rows from %s", table)
+		}
+		columns, err := rows.Columns()
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to get columns of %s", table)
+		}
+		dump.Schema[table] = columns
+
+		for rows.Next() {
+			values := make([]interface{}, len(columns))
+			row := make([]interface{}, len(columns))
+			for i := range values {
+				row[i] = &values[i]
+			}
+			err := rows.Scan(row...)
+			if err != nil {
+				return nil, errors.Wrapf(err, "failed to scan row from %s", table)
+			}
+			data = append(data, values)
+		}
+		err = rows.Err()
+		if err != nil {
+			return nil, errors.Wrapf(err, "error while fetching rows from %s", table)
+		}
+
+		dump.Data[table] = data
+	}
+
+	return dump, nil
+}
+
+// ImportPreClusteringData imports the data loaded with LoadPreClusteringData.
+func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
+	tx, err := c.db.Begin()
+	if err != nil {
+		return errors.Wrap(err, "failed to start cluster database transaction")
+	}
+
+	for table, columns := range dump.Schema {
+		stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
+		stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
+		for i, row := range dump.Data[table] {
+			result, err := tx.Exec(stmt, row...)
+			if err != nil {
+				return errors.Wrapf(err, "failed to insert row %d into %s", i, table)
+			}
+			n, err := result.RowsAffected()
+			if err != nil {
+				return errors.Wrapf(err, "no result count for row %d of %s", i, table)
+			}
+			if n != 1 {
+				return fmt.Errorf("could not insert %d int %s", i, table)
+			}
+		}
+	}
+
+	return tx.Commit()
+}
+
+// Dump is a dump of all the user data in lxd.db prior the migration to the
+// cluster db.
+type Dump struct {
+	// Map table names to the names or their columns.
+	Schema map[string][]string
+
+	// Map a table name to all the rows it contains. Each row is a slice
+	// of interfaces.
+	Data map[string][][]interface{}
+}
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
new file mode 100644
index 000000000..0719a8f61
--- /dev/null
+++ b/lxd/db/migration_test.go
@@ -0,0 +1,260 @@
+package db_test
+
+import (
+	"database/sql"
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestLoadPreClusteringData(t *testing.T) {
+	tx := newPreClusteringTx(t)
+
+	dump, err := db.LoadPreClusteringData(tx)
+	require.NoError(t, err)
+
+	assert.Equal(t, []string{"id", "key", "value"}, dump.Schema["config"])
+	assert.Len(t, dump.Data["config"], 1)
+	rows := []interface{}{int64(1), []byte("core.https_address"), []byte("1.2.3.4:666")}
+	assert.Equal(t, rows, dump.Data["config"][0])
+}
+
+func TestImportPreClusteringData(t *testing.T) {
+	tx := newPreClusteringTx(t)
+
+	dump, err := db.LoadPreClusteringData(tx)
+	require.NoError(t, err)
+
+	cluster, cleanup := db.NewTestCluster(t)
+	defer cleanup()
+
+	err = cluster.ImportPreClusteringData(dump)
+	require.NoError(t, err)
+
+	cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := tx.Config()
+		require.NoError(t, err)
+		values := map[string]string{"core.https_address": "1.2.3.4:666"}
+		assert.Equal(t, values, config)
+		return nil
+	})
+}
+
+// Return a sql.Tx against a memory database populated with pre-clustering
+// data.
+func newPreClusteringTx(t *testing.T) *sql.Tx {
+	db, err := sql.Open("sqlite3", ":memory:")
+	require.NoError(t, err)
+
+	tx, err := db.Begin()
+	require.NoError(t, err)
+
+	stmts := []string{
+		preClusteringNodeSchema,
+		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
+	}
+	for _, stmt := range stmts {
+		_, err := tx.Exec(stmt)
+		require.NoError(t, err)
+	}
+	return tx
+}
+
+const preClusteringNodeSchema = `
+CREATE TABLE schema (
+    id         INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    version    INTEGER NOT NULL,
+    updated_at DATETIME NOT NULL,
+    UNIQUE (version)
+);
+CREATE TABLE certificates (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint VARCHAR(255) NOT NULL,
+    type INTEGER NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    certificate TEXT NOT NULL,
+    UNIQUE (fingerprint)
+);
+CREATE TABLE config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (key)
+);
+CREATE TABLE "containers" (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    architecture INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    ephemeral INTEGER NOT NULL DEFAULT 0,
+    creation_date DATETIME NOT NULL DEFAULT 0,
+    stateful INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE containers_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, key)
+);
+CREATE TABLE containers_devices (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    type INTEGER NOT NULL default 0,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, name)
+);
+CREATE TABLE containers_devices_config (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_device_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_device_id) REFERENCES containers_devices (id) ON DELETE CASCADE,
+    UNIQUE (container_device_id, key)
+);
+CREATE TABLE containers_profiles (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    profile_id INTEGER NOT NULL,
+    apply_order INTEGER NOT NULL default 0,
+    UNIQUE (container_id, profile_id),
+    FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE,
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE images (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint VARCHAR(255) NOT NULL,
+    filename VARCHAR(255) NOT NULL,
+    size INTEGER NOT NULL,
+    public INTEGER NOT NULL DEFAULT 0,
+    architecture INTEGER NOT NULL,
+    creation_date DATETIME,
+    expiry_date DATETIME,
+    upload_date DATETIME NOT NULL,
+    cached INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    auto_update INTEGER NOT NULL DEFAULT 0,
+    UNIQUE (fingerprint)
+);
+CREATE TABLE "images_aliases" (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    image_id INTEGER NOT NULL,
+    description TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    UNIQUE (name)
+);
+CREATE TABLE images_properties (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE images_source (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    server TEXT NOT NULL,
+    protocol INTEGER NOT NULL,
+    certificate TEXT NOT NULL,
+    alias VARCHAR(255) NOT NULL,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE networks (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE networks_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    network_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (network_id, key),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
+);
+CREATE TABLE patches (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    applied_at DATETIME NOT NULL,
+    UNIQUE (name)
+);
+CREATE TABLE profiles (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE profiles_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value VARCHAR(255),
+    UNIQUE (profile_id, key),
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    type INTEGER NOT NULL default 0,
+    UNIQUE (profile_id, name),
+    FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_device_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (profile_device_id, key),
+    FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE CASCADE
+);
+CREATE TABLE raft_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    address TEXT NOT NULL,
+    UNIQUE (address)
+);
+CREATE TABLE storage_pools (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    driver VARCHAR(255) NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE storage_pools_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (storage_pool_id, key),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    description TEXT,
+    UNIQUE (storage_pool_id, name, type),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_volume_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (storage_volume_id, key),
+    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE
+);
+
+INSERT INTO schema (version, updated_at) VALUES (37, strftime("%s"))
+`
diff --git a/lxd/db/query/config.go b/lxd/db/query/config.go
index f970a405b..878b6d8f0 100644
--- a/lxd/db/query/config.go
+++ b/lxd/db/query/config.go
@@ -98,7 +98,7 @@ func deleteConfig(tx *sql.Tx, table string, keys []string) error {
 		return nil // Nothing to delete.
 	}
 
-	query := fmt.Sprintf("DELETE FROM %s WHERE key IN %s", table, exprParams(n))
+	query := fmt.Sprintf("DELETE FROM %s WHERE key IN %s", table, Params(n))
 	values := make([]interface{}, n)
 	for i, key := range keys {
 		values[i] = key
diff --git a/lxd/db/query/expr.go b/lxd/db/query/expr.go
index 3f249c173..393bc166b 100644
--- a/lxd/db/query/expr.go
+++ b/lxd/db/query/expr.go
@@ -7,10 +7,10 @@ import (
 	"strings"
 )
 
-// Return a parameters expression with the given number of '?'
-// placeholders. E.g. exprParams(2) -> "(?, ?)". Useful for
-// IN expressions.
-func exprParams(n int) string {
+// Params returns a parameters expression with the given number of '?'
+// placeholders. E.g. Params(2) -> "(?, ?)". Useful for IN and VALUES
+// expressions.
+func Params(n int) string {
 	tokens := make([]string, n)
 	for i := 0; i < n; i++ {
 		tokens[i] = "?"
diff --git a/lxd/db/query/objects.go b/lxd/db/query/objects.go
index f6dcdad09..edb628964 100644
--- a/lxd/db/query/objects.go
+++ b/lxd/db/query/objects.go
@@ -51,7 +51,7 @@ func UpsertObject(tx *sql.Tx, table string, columns []string, values []interface
 
 	stmt := fmt.Sprintf(
 		"INSERT OR REPLACE INTO %s (%s) VALUES %s",
-		table, strings.Join(columns, ", "), exprParams(n))
+		table, strings.Join(columns, ", "), Params(n))
 	result, err := tx.Exec(stmt, values...)
 	if err != nil {
 		return -1, err

From d6fa64ea86cae60354545ec21f85a7c9533faafc Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 16:57:13 +0000
Subject: [PATCH 032/227] Migrate node data to cluster db upon startup

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go                | 32 +++++++++++++++++++++-----------
 lxd/db/db.go                 | 33 ++++++++++++++++++++++++++-------
 lxd/db/db_internal_test.go   |  2 +-
 lxd/db/node/update.go        |  4 ++++
 lxd/db/testing.go            |  2 +-
 lxd/main_activateifneeded.go |  7 ++++++-
 lxd/profiles_test.go         |  2 +-
 7 files changed, 60 insertions(+), 22 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index d440a725f..d636a8fc5 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -370,13 +370,7 @@ func (d *Daemon) init() error {
 	}
 
 	/* Initialize the database */
-	err = initializeDbObject(d)
-	if err != nil {
-		return err
-	}
-
-	/* Load all config values from the database */
-	err = daemonConfigInit(d.db.DB())
+	dump, err := initializeDbObject(d)
 	if err != nil {
 		return err
 	}
@@ -430,6 +424,21 @@ func (d *Daemon) init() error {
 		return err
 	}
 
+	/* Migrate the node local data to the cluster database, if needed */
+	if dump != nil {
+		logger.Infof("Migrating data from lxd.db to db.bin")
+		err = d.cluster.ImportPreClusteringData(dump)
+		if err != nil {
+			return fmt.Errorf("Failed to migrate data to db.bin: %v", err)
+		}
+	}
+
+	/* Load all config values from the database */
+	err = daemonConfigInit(d.db.DB())
+	if err != nil {
+		return err
+	}
+
 	/* Read the storage pools */
 	err = SetupStorageDriver(d.State(), false)
 	if err != nil {
@@ -683,7 +692,7 @@ func (d *Daemon) setupMAASController(server string, key string, machine string)
 }
 
 // Create a database connection and perform any updates needed.
-func initializeDbObject(d *Daemon) error {
+func initializeDbObject(d *Daemon) (*db.Dump, error) {
 	// NOTE: we use the legacyPatches parameter to run a few
 	// legacy non-db updates that were in place before the
 	// patches mechanism was introduced in lxd/patches.go. The
@@ -717,10 +726,11 @@ func initializeDbObject(d *Daemon) error {
 		return nil
 	}
 	var err error
-	d.db, err = db.OpenNode(d.os.VarDir, freshHook, legacy)
+	var dump *db.Dump
+	d.db, dump, err = db.OpenNode(d.os.VarDir, freshHook, legacy)
 	if err != nil {
-		return fmt.Errorf("Error creating database: %s", err)
+		return nil, fmt.Errorf("Error creating database: %s", err)
 	}
 
-	return nil
+	return dump, nil
 }
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 6b4a49b6d..356727be3 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -46,16 +46,35 @@ type Node struct {
 // The legacyPatches parameter is used as a mean to apply the legacy V10, V11,
 // V15, V29 and V30 non-db updates during the database upgrade sequence, to
 // avoid any change in semantics wrt the old logic (see PR #3322).
-func OpenNode(dir string, fresh func(*Node) error, legacyPatches map[int]*LegacyPatch) (*Node, error) {
+//
+// Return the newly created Node object, and a Dump of the pre-clustering data
+// if we've migrating to a cluster-aware version.
+func OpenNode(dir string, fresh func(*Node) error, legacyPatches map[int]*LegacyPatch) (*Node, *Dump, error) {
+	// When updating the node database schema we'll detect if we're
+	// transitioning to the dqlite-based database and dump all the data
+	// before purging the schema. This data will be then imported by the
+	// daemon into the dqlite database.
+	var dump *Dump
+
 	db, err := node.Open(dir)
 	if err != nil {
-		return nil, err
+		return nil, nil, err
 	}
 
-	hook := legacyPatchHook(db, legacyPatches)
+	legacyHook := legacyPatchHook(db, legacyPatches)
+	hook := func(version int, tx *sql.Tx) error {
+		if version == node.UpdateFromPreClustering {
+			var err error
+			dump, err = LoadPreClusteringData(tx)
+			if err != nil {
+				return err
+			}
+		}
+		return legacyHook(version, tx)
+	}
 	initial, err := node.EnsureSchema(db, dir, hook)
 	if err != nil {
-		return nil, err
+		return nil, nil, err
 	}
 
 	node := &Node{
@@ -66,17 +85,17 @@ func OpenNode(dir string, fresh func(*Node) error, legacyPatches map[int]*Legacy
 	if initial == 0 {
 		err := node.ProfileCreateDefault()
 		if err != nil {
-			return nil, err
+			return nil, nil, err
 		}
 		if fresh != nil {
 			err := fresh(node)
 			if err != nil {
-				return nil, err
+				return nil, nil, err
 			}
 		}
 	}
 
-	return node, nil
+	return node, dump, nil
 }
 
 // ForLegacyPatches is a aid for the hack in initializeDbObject, which sets
diff --git a/lxd/db/db_internal_test.go b/lxd/db/db_internal_test.go
index bdebdc3e9..9daf779a9 100644
--- a/lxd/db/db_internal_test.go
+++ b/lxd/db/db_internal_test.go
@@ -65,7 +65,7 @@ func (s *dbTestSuite) CreateTestDb() *Node {
 	s.dir, err = ioutil.TempDir("", "lxd-db-test")
 	s.Nil(err)
 
-	db, err := OpenNode(s.dir, nil, nil)
+	db, _, err := OpenNode(s.dir, nil, nil)
 	s.Nil(err)
 	return db
 }
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index 95a660202..ce1dd6b85 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -87,6 +87,10 @@ var updates = map[int]schema.Update{
 	37: updateFromV36,
 }
 
+// UpdateFromPreClustering is the last schema version where clustering support
+// was not available, and hence no cluster dqlite database is used.
+const UpdateFromPreClustering = 36
+
 // Schema updates begin here
 
 // Add a raft_nodes table to be used when running in clustered mode. It lists
diff --git a/lxd/db/testing.go b/lxd/db/testing.go
index 65c5ddcae..9f819f5b0 100644
--- a/lxd/db/testing.go
+++ b/lxd/db/testing.go
@@ -20,7 +20,7 @@ func NewTestNode(t *testing.T) (*Node, func()) {
 	dir, err := ioutil.TempDir("", "lxd-db-test-node-")
 	require.NoError(t, err)
 
-	db, err := OpenNode(dir, nil, nil)
+	db, _, err := OpenNode(dir, nil, nil)
 	require.NoError(t, err)
 
 	cleanup := func() {
diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go
index e0d997e93..610103bc3 100644
--- a/lxd/main_activateifneeded.go
+++ b/lxd/main_activateifneeded.go
@@ -1,8 +1,10 @@
 package main
 
 import (
+	"database/sql"
 	"fmt"
 	"os"
+	"path/filepath"
 
 	"github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
@@ -26,10 +28,13 @@ func cmdActivateIfNeeded(args *Args) error {
 		return nil
 	}
 
-	err := initializeDbObject(d)
+	// Open the database directly to avoid triggering any initialization
+	// code, in particular the data migration from node to cluster db.
+	sqldb, err := sql.Open("sqlite3", filepath.Join(d.os.VarDir, "lxd.db"))
 	if err != nil {
 		return err
 	}
+	d.db = db.ForLegacyPatches(sqldb)
 
 	/* Load the configured address the database */
 	address, err := node.HTTPSAddress(d.db)
diff --git a/lxd/profiles_test.go b/lxd/profiles_test.go
index b609da126..2f864cee4 100644
--- a/lxd/profiles_test.go
+++ b/lxd/profiles_test.go
@@ -18,7 +18,7 @@ func Test_removing_a_profile_deletes_associated_configuration_entries(t *testing
 	}
 	defer os.RemoveAll(d.os.VarDir)
 
-	err = initializeDbObject(d)
+	_, err = initializeDbObject(d)
 	if err != nil {
 		t.Fatal(err)
 	}

From 563e7c5f2cc38341de47c86d5fd72c041569ee4a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 17:23:43 +0000
Subject: [PATCH 033/227] Load and save config values to cluster db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go       | 14 ++++++++++++--
 lxd/daemon.go        |  2 +-
 lxd/daemon_config.go | 12 ++++++++----
 lxd/db/config.go     | 34 +++-------------------------------
 lxd/patches.go       |  2 +-
 5 files changed, 25 insertions(+), 39 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index ea0a15d3f..84911cc06 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -157,7 +157,12 @@ func api10Get(d *Daemon, r *http.Request) Response {
 }
 
 func api10Put(d *Daemon, r *http.Request) Response {
-	oldConfig, err := db.ConfigValuesGet(d.db.DB())
+	var oldConfig map[string]string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		oldConfig, err = tx.Config()
+		return err
+	})
 	if err != nil {
 		return SmartError(err)
 	}
@@ -180,7 +185,12 @@ func api10Put(d *Daemon, r *http.Request) Response {
 }
 
 func api10Patch(d *Daemon, r *http.Request) Response {
-	oldConfig, err := db.ConfigValuesGet(d.db.DB())
+	var oldConfig map[string]string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		oldConfig, err = tx.Config()
+		return err
+	})
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index d636a8fc5..b0f9684a4 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -434,7 +434,7 @@ func (d *Daemon) init() error {
 	}
 
 	/* Load all config values from the database */
-	err = daemonConfigInit(d.db.DB())
+	err = daemonConfigInit(d.cluster)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index 9e83c1a56..af109d887 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -2,7 +2,6 @@ package main
 
 import (
 	"crypto/rand"
-	"database/sql"
 	"encoding/hex"
 	"fmt"
 	"io"
@@ -130,7 +129,7 @@ func (k *daemonConfigKey) Set(d *Daemon, value string) error {
 	k.currentValue = value
 	daemonConfigLock.Unlock()
 
-	err = dbapi.ConfigValueSet(d.db.DB(), name, value)
+	err = dbapi.ConfigValueSet(d.cluster, name, value)
 	if err != nil {
 		return err
 	}
@@ -179,7 +178,7 @@ func (k *daemonConfigKey) GetInt64() int64 {
 	return ret
 }
 
-func daemonConfigInit(db *sql.DB) error {
+func daemonConfigInit(cluster *dbapi.Cluster) error {
 	// Set all the keys
 	daemonConfig = map[string]*daemonConfigKey{
 		"core.https_allowed_headers":     {valueType: "string"},
@@ -213,7 +212,12 @@ func daemonConfigInit(db *sql.DB) error {
 	}
 
 	// Load the values from the DB
-	dbValues, err := dbapi.ConfigValuesGet(db)
+	var dbValues map[string]string
+	err := cluster.Transaction(func(tx *dbapi.ClusterTx) error {
+		var err error
+		dbValues, err = tx.Config()
+		return err
+	})
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/config.go b/lxd/db/config.go
index 36136ea5b..d76d8188a 100644
--- a/lxd/db/config.go
+++ b/lxd/db/config.go
@@ -1,10 +1,6 @@
 package db
 
-import (
-	"database/sql"
-
-	"github.com/lxc/lxd/lxd/db/query"
-)
+import "github.com/lxc/lxd/lxd/db/query"
 
 // Config fetches all LXD node-level config keys.
 func (n *NodeTx) Config() (map[string]string, error) {
@@ -28,32 +24,8 @@ func (c *ClusterTx) UpdateConfig(values map[string]string) error {
 	return query.UpdateConfig(c.tx, "config", values)
 }
 
-func ConfigValuesGet(db *sql.DB) (map[string]string, error) {
-	q := "SELECT key, value FROM config"
-	rows, err := dbQuery(db, q)
-	if err != nil {
-		return map[string]string{}, err
-	}
-	defer rows.Close()
-
-	results := map[string]string{}
-
-	for rows.Next() {
-		var key, value string
-		rows.Scan(&key, &value)
-		// FIXME: we can get rid of this special casing as soon as we
-		//        move config keys to the cluster database.
-		if key == "core.https_address" {
-			continue
-		}
-		results[key] = value
-	}
-
-	return results, nil
-}
-
-func ConfigValueSet(db *sql.DB, key string, value string) error {
-	tx, err := begin(db)
+func ConfigValueSet(cluster *Cluster, key string, value string) error {
+	tx, err := begin(cluster.db)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/patches.go b/lxd/patches.go
index 79367d5a3..84a665aa4 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -2773,7 +2773,7 @@ func patchUpdateFromV15(d *Daemon) error {
 		return err
 	}
 
-	err = daemonConfigInit(d.db.DB())
+	err = daemonConfigInit(d.cluster)
 	if err != nil {
 		return err
 	}

From 1693a5a1e5501dc8854522b6a1467de2db36b3e5 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 17:34:43 +0000
Subject: [PATCH 034/227] Add initial cluster.Config machinery

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/config.go      | 94 ++++++++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/config_test.go | 94 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 188 insertions(+)
 create mode 100644 lxd/cluster/config.go
 create mode 100644 lxd/cluster/config_test.go

diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
new file mode 100644
index 000000000..f257c396a
--- /dev/null
+++ b/lxd/cluster/config.go
@@ -0,0 +1,94 @@
+package cluster
+
+import (
+	"fmt"
+
+	"github.com/lxc/lxd/lxd/config"
+	"github.com/lxc/lxd/lxd/db"
+)
+
+// Config holds cluster-wide configuration values.
+type Config struct {
+	tx *db.ClusterTx // DB transaction the values in this config are bound to.
+	m  config.Map    // Low-level map holding the config values.
+}
+
+// ConfigLoad loads a new Config object with the current cluster configuration
+// values fetched from the database.
+func ConfigLoad(tx *db.ClusterTx) (*Config, error) {
+	// Load current raw values from the database, any error is fatal.
+	values, err := tx.Config()
+	if err != nil {
+		return nil, fmt.Errorf("cannot fetch node config from database: %v", err)
+	}
+
+	m, err := config.SafeLoad(ConfigSchema, values)
+	if err != nil {
+		return nil, fmt.Errorf("failed to load node config: %v", err)
+	}
+
+	return &Config{tx: tx, m: m}, nil
+}
+
+// ProxyHTTP returns the configured HTTP proxy, if any.
+func (c *Config) ProxyHTTP() string {
+	return c.m.GetString("core.proxy_http")
+}
+
+// Dump current configuration keys and their values. Keys with values matching
+// their defaults are omitted.
+func (c *Config) Dump() map[string]interface{} {
+	return c.m.Dump()
+}
+
+// Replace the current configuration with the given values.
+func (c *Config) Replace(values map[string]interface{}) error {
+	return c.update(values)
+}
+
+// Patch changes only the configuration keys in the given map.
+func (c *Config) Patch(patch map[string]interface{}) error {
+	values := c.Dump() // Use current values as defaults
+	for name, value := range patch {
+		values[name] = value
+	}
+	return c.update(values)
+}
+
+func (c *Config) update(values map[string]interface{}) error {
+	changed, err := c.m.Change(values)
+	if err != nil {
+		return fmt.Errorf("invalid configuration changes: %s", err)
+	}
+
+	err = c.tx.UpdateConfig(changed)
+	if err != nil {
+		return fmt.Errorf("cannot persist confiuration changes: %v", err)
+	}
+
+	return nil
+}
+
+// ConfigSchema defines available server configuration keys.
+var ConfigSchema = config.Schema{
+	"core.https_allowed_headers":     {},
+	"core.https_allowed_methods":     {},
+	"core.https_allowed_origin":      {},
+	"core.https_allowed_credentials": {},
+	"core.proxy_http":                {},
+	"core.proxy_https":               {},
+	"core.proxy_ignore_hosts":        {},
+	"core.trust_password":            {},
+	"images.auto_update_cached":      {},
+	"images.auto_update_interval":    {},
+	"images.compression_algorithm":   {},
+	"images.remote_cache_expiry":     {},
+	"storage.lvm_fstype":             {},
+	"storage.lvm_mount_options":      {},
+	"storage.lvm_thinpool_name":      {},
+	"storage.lvm_vg_name":            {},
+	"storage.lvm_volume_size":        {},
+	"storage.zfs_pool_name":          {},
+	"storage.zfs_remove_snapshots":   {},
+	"storage.zfs_use_refquota":       {},
+}
diff --git a/lxd/cluster/config_test.go b/lxd/cluster/config_test.go
new file mode 100644
index 000000000..0da3979d8
--- /dev/null
+++ b/lxd/cluster/config_test.go
@@ -0,0 +1,94 @@
+package cluster_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// The server configuration is initially empty.
+func TestConfigLoad_Initial(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	config, err := cluster.ConfigLoad(tx)
+
+	require.NoError(t, err)
+	assert.Equal(t, map[string]interface{}{}, config.Dump())
+}
+
+// If the database contains invalid keys, they are ignored.
+func TestConfigLoad_IgnoreInvalidKeys(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	err := tx.UpdateConfig(map[string]string{
+		"foo":             "garbage",
+		"core.proxy_http": "foo.bar",
+	})
+	require.NoError(t, err)
+
+	config, err := cluster.ConfigLoad(tx)
+
+	require.NoError(t, err)
+	values := map[string]interface{}{"core.proxy_http": "foo.bar"}
+	assert.Equal(t, values, config.Dump())
+}
+
+// Triggers can be specified to execute custom code on config key changes.
+func TestConfigLoad_Triggers(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	config, err := cluster.ConfigLoad(tx)
+
+	require.NoError(t, err)
+	assert.Equal(t, map[string]interface{}{}, config.Dump())
+}
+
+// If some previously set values are missing from the ones passed to Replace(),
+// they are deleted from the configuration.
+func TestConfig_ReplaceDeleteValues(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	config, err := cluster.ConfigLoad(tx)
+	require.NoError(t, err)
+
+	err = config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
+	assert.NoError(t, err)
+
+	err = config.Replace(map[string]interface{}{})
+	assert.NoError(t, err)
+
+	assert.Equal(t, "", config.ProxyHTTP())
+
+	values, err := tx.Config()
+	require.NoError(t, err)
+	assert.Equal(t, map[string]string{}, values)
+}
+
+// If some previously set values are missing from the ones passed to Patch(),
+// they are kept as they are.
+func TestConfig_PatchKeepsValues(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	config, err := cluster.ConfigLoad(tx)
+	require.NoError(t, err)
+
+	err = config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
+	assert.NoError(t, err)
+
+	err = config.Patch(map[string]interface{}{})
+	assert.NoError(t, err)
+
+	assert.Equal(t, "foo.bar", config.ProxyHTTP())
+
+	values, err := tx.Config()
+	require.NoError(t, err)
+	assert.Equal(t, map[string]string{"core.proxy_http": "foo.bar"}, values)
+}

From 4c5f42efb9fdaa05d47d07784b4013a3e5ac5358 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 17:35:53 +0000
Subject: [PATCH 035/227] Drop legacy node.Config keys

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/config.go |  3 +++
 lxd/node/config.go    | 27 ---------------------------
 lxd/patches.go        | 12 +++++++++---
 3 files changed, 12 insertions(+), 30 deletions(-)

diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index f257c396a..4cb56f91b 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -83,6 +83,9 @@ var ConfigSchema = config.Schema{
 	"images.auto_update_interval":    {},
 	"images.compression_algorithm":   {},
 	"images.remote_cache_expiry":     {},
+	"maas.api.key":                   {},
+	"maas.api.url":                   {},
+	"maas.machine":                   {},
 	"storage.lvm_fstype":             {},
 	"storage.lvm_mount_options":      {},
 	"storage.lvm_thinpool_name":      {},
diff --git a/lxd/node/config.go b/lxd/node/config.go
index 8e305d5b0..26f06004d 100644
--- a/lxd/node/config.go
+++ b/lxd/node/config.go
@@ -90,31 +90,4 @@ func (c *Config) update(values map[string]interface{}) error {
 var ConfigSchema = config.Schema{
 	// Network address for this LXD server.
 	"core.https_address": {},
-
-	// FIXME: Legacy node-level config values. Will be migrated to
-	//        cluster-config, but we need them here just to avoid
-	//        spurious errors in the logs
-	"core.https_allowed_headers":     {},
-	"core.https_allowed_methods":     {},
-	"core.https_allowed_origin":      {},
-	"core.https_allowed_credentials": {},
-	"core.proxy_http":                {},
-	"core.proxy_https":               {},
-	"core.proxy_ignore_hosts":        {},
-	"core.trust_password":            {},
-	"images.auto_update_cached":      {},
-	"images.auto_update_interval":    {},
-	"images.compression_algorithm":   {},
-	"images.remote_cache_expiry":     {},
-	"maas.api.key":                   {},
-	"maas.api.url":                   {},
-	"maas.machine":                   {},
-	"storage.lvm_fstype":             {},
-	"storage.lvm_mount_options":      {},
-	"storage.lvm_thinpool_name":      {},
-	"storage.lvm_vg_name":            {},
-	"storage.lvm_volume_size":        {},
-	"storage.zfs_pool_name":          {},
-	"storage.zfs_remove_snapshots":   {},
-	"storage.zfs_use_refquota":       {},
 }
diff --git a/lxd/patches.go b/lxd/patches.go
index 84a665aa4..bcdfa576d 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -2773,13 +2773,19 @@ func patchUpdateFromV15(d *Daemon) error {
 		return err
 	}
 
-	err = daemonConfigInit(d.cluster)
+	vgName := ""
+	err = d.db.Transaction(func(tx *db.NodeTx) error {
+		config, err := tx.Config()
+		if err != nil {
+			return err
+		}
+		vgName = config["storage.lvm_vg_name"]
+		return nil
+	})
 	if err != nil {
 		return err
 	}
 
-	vgName := daemonConfig["storage.lvm_vg_name"].Get()
-
 	for _, cName := range cNames {
 		var lvLinkPath string
 		if strings.Contains(cName, shared.SnapshotDelimiter) {

From 3aa95a2d00eb14a8094d5d1ba00326b5ecfbf161 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 18:10:25 +0000
Subject: [PATCH 036/227] Load CORS headers settings from the database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api.go            | 45 +++++++++++++++++++++++++++++++++------------
 lxd/cluster/config.go | 22 +++++++++++++++++++++-
 lxd/daemon_config.go  | 14 +++++---------
 3 files changed, 59 insertions(+), 22 deletions(-)

diff --git a/lxd/api.go b/lxd/api.go
index e038e76dc..ba6285ce9 100644
--- a/lxd/api.go
+++ b/lxd/api.go
@@ -2,10 +2,13 @@ package main
 
 import (
 	"net/http"
+	"strings"
 
 	log "github.com/lxc/lxd/shared/log15"
 
 	"github.com/gorilla/mux"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared/logger"
 )
 
@@ -48,32 +51,50 @@ type lxdHttpServer struct {
 }
 
 func (s *lxdHttpServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
-	allowedOrigin := daemonConfig["core.https_allowed_origin"].Get()
+	// Set CORS headers, unless this is an internal or gRPC request.
+	if !strings.HasPrefix(req.URL.Path, "/internal") && !strings.HasPrefix(req.URL.Path, "/protocol.SQL") {
+		<-s.d.setupChan
+		err := s.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+			config, err := cluster.ConfigLoad(tx)
+			if err != nil {
+				return err
+			}
+			setCORSHeaders(rw, req, config)
+			return nil
+		})
+		if err != nil {
+			http.Error(rw, err.Error(), http.StatusInternalServerError)
+		}
+	}
+
+	// OPTIONS request don't need any further processing
+	if req.Method == "OPTIONS" {
+		return
+	}
+
+	// Call the original server
+	s.r.ServeHTTP(rw, req)
+}
+
+func setCORSHeaders(rw http.ResponseWriter, req *http.Request, config *cluster.Config) {
+	allowedOrigin := config.HTTPSAllowedOrigin()
 	origin := req.Header.Get("Origin")
 	if allowedOrigin != "" && origin != "" {
 		rw.Header().Set("Access-Control-Allow-Origin", allowedOrigin)
 	}
 
-	allowedMethods := daemonConfig["core.https_allowed_methods"].Get()
+	allowedMethods := config.HTTPSAllowedMethods()
 	if allowedMethods != "" && origin != "" {
 		rw.Header().Set("Access-Control-Allow-Methods", allowedMethods)
 	}
 
-	allowedHeaders := daemonConfig["core.https_allowed_headers"].Get()
+	allowedHeaders := config.HTTPSAllowedHeaders()
 	if allowedHeaders != "" && origin != "" {
 		rw.Header().Set("Access-Control-Allow-Headers", allowedHeaders)
 	}
 
-	allowedCredentials := daemonConfig["core.https_allowed_credentials"].GetBool()
+	allowedCredentials := config.HTTPSAllowedCredentials()
 	if allowedCredentials {
 		rw.Header().Set("Access-Control-Allow-Credentials", "true")
 	}
-
-	// OPTIONS request don't need any further processing
-	if req.Method == "OPTIONS" {
-		return
-	}
-
-	// Call the original server
-	s.r.ServeHTTP(rw, req)
 }
diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index 4cb56f91b..17e26538c 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -30,6 +30,26 @@ func ConfigLoad(tx *db.ClusterTx) (*Config, error) {
 	return &Config{tx: tx, m: m}, nil
 }
 
+// HTTPSAllowedHeaders returns the relevant CORS setting.
+func (c *Config) HTTPSAllowedHeaders() string {
+	return c.m.GetString("core.https_allowed_headers")
+}
+
+// HTTPSAllowedMethods returns the relevant CORS setting.
+func (c *Config) HTTPSAllowedMethods() string {
+	return c.m.GetString("core.https_allowed_methods")
+}
+
+// HTTPSAllowedOrigin returns the relevant CORS setting.
+func (c *Config) HTTPSAllowedOrigin() string {
+	return c.m.GetString("core.https_allowed_origin")
+}
+
+// HTTPSAllowedCredentials returns the relevant CORS setting.
+func (c *Config) HTTPSAllowedCredentials() bool {
+	return c.m.GetBool("core.https_allowed_credentials")
+}
+
 // ProxyHTTP returns the configured HTTP proxy, if any.
 func (c *Config) ProxyHTTP() string {
 	return c.m.GetString("core.proxy_http")
@@ -74,7 +94,7 @@ var ConfigSchema = config.Schema{
 	"core.https_allowed_headers":     {},
 	"core.https_allowed_methods":     {},
 	"core.https_allowed_origin":      {},
-	"core.https_allowed_credentials": {},
+	"core.https_allowed_credentials": {Type: config.Bool},
 	"core.proxy_http":                {},
 	"core.proxy_https":               {},
 	"core.proxy_ignore_hosts":        {},
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index af109d887..e9a5924cc 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -181,15 +181,11 @@ func (k *daemonConfigKey) GetInt64() int64 {
 func daemonConfigInit(cluster *dbapi.Cluster) error {
 	// Set all the keys
 	daemonConfig = map[string]*daemonConfigKey{
-		"core.https_allowed_headers":     {valueType: "string"},
-		"core.https_allowed_methods":     {valueType: "string"},
-		"core.https_allowed_origin":      {valueType: "string"},
-		"core.https_allowed_credentials": {valueType: "bool"},
-		"core.proxy_http":                {valueType: "string", setter: daemonConfigSetProxy},
-		"core.proxy_https":               {valueType: "string", setter: daemonConfigSetProxy},
-		"core.proxy_ignore_hosts":        {valueType: "string", setter: daemonConfigSetProxy},
-		"core.trust_password":            {valueType: "string", hiddenValue: true, setter: daemonConfigSetPassword},
-		"core.macaroon.endpoint":         {valueType: "string", setter: daemonConfigSetMacaroonEndpoint},
+		"core.proxy_http":         {valueType: "string", setter: daemonConfigSetProxy},
+		"core.proxy_https":        {valueType: "string", setter: daemonConfigSetProxy},
+		"core.proxy_ignore_hosts": {valueType: "string", setter: daemonConfigSetProxy},
+		"core.trust_password":     {valueType: "string", hiddenValue: true, setter: daemonConfigSetPassword},
+		"core.macaroon.endpoint":  {valueType: "string", setter: daemonConfigSetMacaroonEndpoint},
 
 		"images.auto_update_cached":    {valueType: "bool", defaultValue: "true"},
 		"images.auto_update_interval":  {valueType: "int", defaultValue: "6", trigger: daemonConfigTriggerAutoUpdateInterval},

From 69725886b32f35252c420679a9762b2cd41d0008 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 19:53:36 +0000
Subject: [PATCH 037/227] Load deprecated storage config keys from database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/config.go | 25 +++++++++++++-------
 lxd/daemon_config.go  | 18 ---------------
 lxd/patches.go        | 63 ++++++++++++++++++++++++++++++++++++++-------------
 3 files changed, 64 insertions(+), 42 deletions(-)

diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index 17e26538c..7c2e84852 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -106,12 +106,21 @@ var ConfigSchema = config.Schema{
 	"maas.api.key":                   {},
 	"maas.api.url":                   {},
 	"maas.machine":                   {},
-	"storage.lvm_fstype":             {},
-	"storage.lvm_mount_options":      {},
-	"storage.lvm_thinpool_name":      {},
-	"storage.lvm_vg_name":            {},
-	"storage.lvm_volume_size":        {},
-	"storage.zfs_pool_name":          {},
-	"storage.zfs_remove_snapshots":   {},
-	"storage.zfs_use_refquota":       {},
+
+	// Keys deprecated since the implementation of the storage api.
+	"storage.lvm_fstype":           {Setter: deprecatedStorage, Default: "ext4"},
+	"storage.lvm_mount_options":    {Setter: deprecatedStorage, Default: "discard"},
+	"storage.lvm_thinpool_name":    {Setter: deprecatedStorage, Default: "LXDPool"},
+	"storage.lvm_vg_name":          {Setter: deprecatedStorage},
+	"storage.lvm_volume_size":      {Setter: deprecatedStorage, Default: "10GiB"},
+	"storage.zfs_pool_name":        {Setter: deprecatedStorage},
+	"storage.zfs_remove_snapshots": {Setter: deprecatedStorage, Type: config.Bool},
+	"storage.zfs_use_refquota":     {Setter: deprecatedStorage, Type: config.Bool},
+}
+
+func deprecatedStorage(value string) (string, error) {
+	if value == "" {
+		return "", nil
+	}
+	return "", fmt.Errorf("deprecated: use storage pool configuration")
 }
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index e9a5924cc..6bcfa3a3e 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -195,16 +195,6 @@ func daemonConfigInit(cluster *dbapi.Cluster) error {
 		"maas.api.key": {valueType: "string", setter: daemonConfigSetMAAS},
 		"maas.api.url": {valueType: "string", setter: daemonConfigSetMAAS},
 		"maas.machine": {valueType: "string", setter: daemonConfigSetMAAS},
-
-		// Keys deprecated since the implementation of the storage api.
-		"storage.lvm_fstype":           {valueType: "string", defaultValue: "ext4", validValues: []string{"btrfs", "ext4", "xfs"}, validator: storageDeprecatedKeys},
-		"storage.lvm_mount_options":    {valueType: "string", defaultValue: "discard", validator: storageDeprecatedKeys},
-		"storage.lvm_thinpool_name":    {valueType: "string", defaultValue: "LXDPool", validator: storageDeprecatedKeys},
-		"storage.lvm_vg_name":          {valueType: "string", validator: storageDeprecatedKeys},
-		"storage.lvm_volume_size":      {valueType: "string", defaultValue: "10GiB", validator: storageDeprecatedKeys},
-		"storage.zfs_pool_name":        {valueType: "string", validator: storageDeprecatedKeys},
-		"storage.zfs_remove_snapshots": {valueType: "bool", validator: storageDeprecatedKeys},
-		"storage.zfs_use_refquota":     {valueType: "bool", validator: storageDeprecatedKeys},
 	}
 
 	// Load the values from the DB
@@ -372,11 +362,3 @@ func daemonConfigValidateCompression(d *Daemon, key string, value string) error
 	_, err := exec.LookPath(value)
 	return err
 }
-
-func storageDeprecatedKeys(d *Daemon, key string, value string) error {
-	if value == "" || daemonConfig[key].defaultValue == value {
-		return nil
-	}
-
-	return fmt.Errorf("Setting the key \"%s\" is deprecated in favor of storage pool configuration.", key)
-}
diff --git a/lxd/patches.go b/lxd/patches.go
index bcdfa576d..a78588d40 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -8,6 +8,7 @@ import (
 	"strings"
 	"syscall"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
@@ -163,8 +164,18 @@ func patchNetworkPermissions(name string, d *Daemon) error {
 }
 
 func patchStorageApi(name string, d *Daemon) error {
-	lvmVgName := daemonConfig["storage.lvm_vg_name"].Get()
-	zfsPoolName := daemonConfig["storage.zfs_pool_name"].Get()
+	var daemonConfig map[string]string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		daemonConfig, err = tx.Config()
+		return err
+	})
+	if err != nil {
+		return err
+	}
+
+	lvmVgName := daemonConfig["storage.lvm_vg_name"]
+	zfsPoolName := daemonConfig["storage.zfs_pool_name"]
 	defaultPoolName := "default"
 	preStorageApiStorageType := storageTypeDir
 
@@ -268,14 +279,25 @@ func patchStorageApi(name string, d *Daemon) error {
 	}
 
 	// Unset deprecated storage keys.
-	daemonConfig["storage.lvm_fstype"].Set(d, "")
-	daemonConfig["storage.lvm_mount_options"].Set(d, "")
-	daemonConfig["storage.lvm_thinpool_name"].Set(d, "")
-	daemonConfig["storage.lvm_vg_name"].Set(d, "")
-	daemonConfig["storage.lvm_volume_size"].Set(d, "")
-	daemonConfig["storage.zfs_pool_name"].Set(d, "")
-	daemonConfig["storage.zfs_remove_snapshots"].Set(d, "")
-	daemonConfig["storage.zfs_use_refquota"].Set(d, "")
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		if err != nil {
+			return err
+		}
+		return config.Patch(map[string]interface{}{
+			"storage.lvm_fstype":           "",
+			"storage.lvm_mount_options":    "",
+			"storage.lvm_thinpool_name":    "",
+			"storage.lvm_vg_name":          "",
+			"storage.lvm_volume_size":      "",
+			"storage.zfs_pool_name":        "",
+			"storage.zfs_remove_snapshots": "",
+			"storage.zfs_use_refquota":     "",
+		})
+	})
+	if err != nil {
+		return err
+	}
 
 	return SetupStorageDriver(d.State(), true)
 }
@@ -831,26 +853,35 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 	poolConfig["source"] = defaultPoolName
 
 	// Set it only if it is not the default value.
-	fsType := daemonConfig["storage.lvm_fstype"].Get()
+	var daemonConfig map[string]string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		daemonConfig, err = tx.Config()
+		return err
+	})
+	if err != nil {
+		return err
+	}
+	fsType := daemonConfig["storage.lvm_fstype"]
 	if fsType != "" && fsType != "ext4" {
 		poolConfig["volume.block.filesystem"] = fsType
 	}
 
 	// Set it only if it is not the default value.
-	fsMntOpts := daemonConfig["storage.lvm_mount_options"].Get()
+	fsMntOpts := daemonConfig["storage.lvm_mount_options"]
 	if fsMntOpts != "" && fsMntOpts != "discard" {
 		poolConfig["volume.block.mount_options"] = fsMntOpts
 	}
 
-	poolConfig["lvm.thinpool_name"] = daemonConfig["storage.lvm_thinpool_name"].Get()
+	poolConfig["lvm.thinpool_name"] = daemonConfig["storage.lvm_thinpool_name"]
 	if poolConfig["lvm.thinpool_name"] == "" {
 		// If empty we need to set it to the old default.
 		poolConfig["lvm.thinpool_name"] = "LXDPool"
 	}
 
-	poolConfig["lvm.vg_name"] = daemonConfig["storage.lvm_vg_name"].Get()
+	poolConfig["lvm.vg_name"] = daemonConfig["storage.lvm_vg_name"]
 
-	poolConfig["volume.size"] = daemonConfig["storage.lvm_volume_size"].Get()
+	poolConfig["volume.size"] = daemonConfig["storage.lvm_volume_size"]
 	if poolConfig["volume.size"] != "" {
 		// In case stuff like GiB is used which
 		// share.dParseByteSizeString() doesn't handle.
@@ -862,7 +893,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 	// "volume.size", so unset it.
 	poolConfig["size"] = ""
 
-	err := storagePoolValidateConfig(defaultPoolName, defaultStorageTypeName, poolConfig, nil)
+	err = storagePoolValidateConfig(defaultPoolName, defaultStorageTypeName, poolConfig, nil)
 	if err != nil {
 		return err
 	}

From 79552a3aa71902f13a33a25f2aa9d7b4fd6fd753 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 20:03:50 +0000
Subject: [PATCH 038/227] Load daemon config from database when rendering REST
 responses

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/config.go | 11 ++++++-----
 lxd/daemon_config.go  | 27 +++++++++++++--------------
 2 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index 7c2e84852..16d8e60a3 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -98,11 +98,12 @@ var ConfigSchema = config.Schema{
 	"core.proxy_http":                {},
 	"core.proxy_https":               {},
 	"core.proxy_ignore_hosts":        {},
-	"core.trust_password":            {},
-	"images.auto_update_cached":      {},
-	"images.auto_update_interval":    {},
-	"images.compression_algorithm":   {},
-	"images.remote_cache_expiry":     {},
+	"core.trust_password":            {Hidden: true},
+	"core.macaroon.endpoint":         {},
+	"images.auto_update_cached":      {Type: config.Bool, Default: "true"},
+	"images.auto_update_interval":    {Type: config.Int64, Default: "6"},
+	"images.compression_algorithm":   {Default: "gzip"},
+	"images.remote_cache_expiry":     {Type: config.Int64, Default: "10"},
 	"maas.api.key":                   {},
 	"maas.api.url":                   {},
 	"maas.machine":                   {},
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index 6bcfa3a3e..d6f762e2f 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -13,6 +13,7 @@ import (
 	log "github.com/lxc/lxd/shared/log15"
 	"golang.org/x/crypto/scrypt"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	dbapi "github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
@@ -227,28 +228,26 @@ func daemonConfigRender(state *state.State) (map[string]interface{}, error) {
 	config := map[string]interface{}{}
 
 	// Turn the config into a JSON-compatible map
-	for k, v := range daemonConfig {
-		value := v.Get()
-		if value != v.defaultValue {
-			if v.hiddenValue {
-				config[k] = true
-			} else {
-				config[k] = value
-			}
+	err := state.Cluster.Transaction(func(tx *dbapi.ClusterTx) error {
+		clusterConfig, err := cluster.ConfigLoad(tx)
+		if err != nil {
+			return err
+		}
+		for key, value := range clusterConfig.Dump() {
+			config[key] = value
 		}
+		return nil
+	})
+	if err != nil {
+		return nil, err
 	}
 
-	err := state.Node.Transaction(func(tx *dbapi.NodeTx) error {
+	err = state.Node.Transaction(func(tx *dbapi.NodeTx) error {
 		nodeConfig, err := node.ConfigLoad(tx)
 		if err != nil {
 			return err
 		}
 		for key, value := range nodeConfig.Dump() {
-			// FIXME: we can drop this conditional as soon as we
-			//        migrate all non-node-local keys to the cluster db
-			if key != "core.https_address" {
-				continue
-			}
 			config[key] = value
 		}
 		return nil

From e6123b189318a3c645bb6bfa0a3d631396504740 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 18 Oct 2017 14:53:46 +0000
Subject: [PATCH 039/227] Retry failed cluster transactions

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/db.go          | 24 ++++++++++++++++++++----
 lxd/main_init_test.go |  2 ++
 2 files changed, 22 insertions(+), 4 deletions(-)

diff --git a/lxd/db/db.go b/lxd/db/db.go
index 356727be3..420257762 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -3,6 +3,7 @@ package db
 import (
 	"database/sql"
 	"fmt"
+	"strings"
 	"time"
 
 	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
@@ -182,10 +183,25 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 // database, otherwise they are rolled back.
 func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 	clusterTx := &ClusterTx{}
-	return query.Transaction(c.db, func(tx *sql.Tx) error {
-		clusterTx.tx = tx
-		return f(clusterTx)
-	})
+
+	// FIXME: the retry loop should be configurable.
+	var err error
+	for i := 0; i < 10; i++ {
+		err = query.Transaction(c.db, func(tx *sql.Tx) error {
+			clusterTx.tx = tx
+			return f(clusterTx)
+		})
+		if err != nil {
+			// FIXME: we should bubble errors using errors.Wrap()
+			// instead, and check for sql.ErrBadConnection.
+			if strings.Contains(err.Error(), "bad connection") {
+				time.Sleep(time.Second)
+				continue
+			}
+		}
+		break
+	}
+	return err
 }
 
 // Close the database facade.
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 8b3a2ad9a..8ea05500f 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -14,6 +14,7 @@ import (
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/cmd"
+	"github.com/lxc/lxd/shared/logging"
 	"github.com/stretchr/testify/suite"
 )
 
@@ -27,6 +28,7 @@ type cmdInitTestSuite struct {
 }
 
 func (suite *cmdInitTestSuite) SetupTest() {
+	logging.Testing(suite.T())
 	suite.lxdTestSuite.SetupTest()
 	suite.streams = cmd.NewMemoryStreams("")
 	suite.context = cmd.NewMemoryContext(suite.streams)

From a8601fae29be72f3998c10b7ba393d65a2279035 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 19 Oct 2017 11:10:47 +0000
Subject: [PATCH 040/227] Add helpers to update the heartbeat column of a nodes
 row in the db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/node.go      | 46 +++++++++++++++++++++++++++++++++++++++-------
 lxd/db/node_test.go | 21 +++++++++++++++++++++
 2 files changed, 60 insertions(+), 7 deletions(-)

diff --git a/lxd/db/node.go b/lxd/db/node.go
index ca02779c1..96fd70bf7 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -1,6 +1,9 @@
 package db
 
 import (
+	"fmt"
+	"time"
+
 	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/version"
@@ -9,12 +12,19 @@ import (
 
 // NodeInfo holds information about a single LXD instance in a cluster.
 type NodeInfo struct {
-	ID            int64  // Stable node identifier
-	Name          string // User-assigned name of the node
-	Address       string // Network address of the node
-	Description   string // Node description (optional)
-	Schema        int    // Schema version of the LXD code running the node
-	APIExtensions int    // Number of API extensions of the LXD code running on the node
+	ID            int64     // Stable node identifier
+	Name          string    // User-assigned name of the node
+	Address       string    // Network address of the node
+	Description   string    // Node description (optional)
+	Schema        int       // Schema version of the LXD code running the node
+	APIExtensions int       // Number of API extensions of the LXD code running on the node
+	Heartbeat     time.Time // Timestamp of the last heartbeat
+}
+
+// IsDown returns true if the last heartbeat time of the node is older than 20
+// seconds.
+func (n NodeInfo) IsDown() bool {
+	return n.Heartbeat.Before(time.Now().Add(-20 * time.Second))
 }
 
 // Nodes returns all LXD nodes part of the cluster.
@@ -31,9 +41,14 @@ func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
 			&nodes[i].Description,
 			&nodes[i].Schema,
 			&nodes[i].APIExtensions,
+			&nodes[i].Heartbeat,
 		}
 	}
-	stmt := "SELECT id, name, address, description, schema, api_extensions FROM nodes ORDER BY id"
+	stmt := `
+SELECT id, name, address, description, schema, api_extensions, heartbeat
+  FROM nodes
+    ORDER BY id
+`
 	err := query.SelectObjects(c.tx, dest, stmt)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to fecth nodes")
@@ -48,3 +63,20 @@ func (c *ClusterTx) NodeAdd(name string, address string) (int64, error) {
 	values := []interface{}{name, address, cluster.SchemaVersion, len(version.APIExtensions)}
 	return query.UpsertObject(c.tx, "nodes", columns, values)
 }
+
+// NodeHeartbeat updates the heartbeat column of the node with the given address.
+func (c *ClusterTx) NodeHeartbeat(address string, heartbeat time.Time) error {
+	stmt := "UPDATE nodes SET heartbeat=? WHERE address=?"
+	result, err := c.tx.Exec(stmt, heartbeat, address)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return fmt.Errorf("expected to update one row and not %d", n)
+	}
+	return nil
+}
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 82d3af111..2dbdf0efc 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -2,6 +2,7 @@ package db_test
 
 import (
 	"testing"
+	"time"
 
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/db/cluster"
@@ -28,4 +29,24 @@ func TestNodeAdd(t *testing.T) {
 	assert.Equal(t, "1.2.3.4:666", node.Address)
 	assert.Equal(t, cluster.SchemaVersion, node.Schema)
 	assert.Equal(t, len(version.APIExtensions), node.APIExtensions)
+	assert.False(t, node.IsDown())
+}
+
+// Update the heartbeat of a node.
+func TestNodeHeartbeat(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	err = tx.NodeHeartbeat("1.2.3.4:666", time.Now().Add(-time.Minute))
+	require.NoError(t, err)
+
+	nodes, err := tx.Nodes()
+	require.NoError(t, err)
+	require.Len(t, nodes, 1)
+
+	node := nodes[0]
+	assert.True(t, node.IsDown())
 }

From b2037f14003af41be1a8a456df13721e9696439a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 19 Oct 2017 11:11:58 +0000
Subject: [PATCH 041/227] Add cluster.Notifier to run client interactions
 against peer nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/notify.go      | 128 ++++++++++++++++++++++++++++++++
 lxd/cluster/notify_test.go | 180 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 308 insertions(+)
 create mode 100644 lxd/cluster/notify.go
 create mode 100644 lxd/cluster/notify_test.go

diff --git a/lxd/cluster/notify.go b/lxd/cluster/notify.go
new file mode 100644
index 000000000..860692111
--- /dev/null
+++ b/lxd/cluster/notify.go
@@ -0,0 +1,128 @@
+package cluster
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
+	"github.com/pkg/errors"
+)
+
+// Notifier is a function that invokes the given function against each node in
+// the cluster excluding the invoking one.
+type Notifier func(hook func(lxd.ContainerServer) error) error
+
+// NotifierPolicy can be used to tweak the behavior of NewNotifier in case of
+// some nodes are down.
+type NotifierPolicy int
+
+// Possible notifcation policies.
+const (
+	NotifyAll   NotifierPolicy = iota // Requires that all nodes are up.
+	NotifyAlive                       // Only notifies nodes that are alive
+)
+
+// NewNotifier builds a Notifier that can be used to notify other peers using
+// the given policy.
+func NewNotifier(state *state.State, cert *shared.CertInfo, policy NotifierPolicy) (Notifier, error) {
+	address, err := node.HTTPSAddress(state.Node)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to fetch node address")
+	}
+
+	// Fast-track the case where we're not networked at all.
+	if address == "" {
+		nullNotifier := func(func(lxd.ContainerServer) error) error { return nil }
+		return nullNotifier, nil
+	}
+
+	peers := []string{}
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		if err != nil {
+			return err
+		}
+		for _, node := range nodes {
+			if node.Address == address {
+				continue // Exclude ourselves
+			}
+			if node.IsDown() {
+				switch policy {
+				case NotifyAll:
+					return fmt.Errorf("peer node %s is down", node.Address)
+				case NotifyAlive:
+					continue // Just skip this node
+				}
+			}
+			peers = append(peers, node.Address)
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Client parameters to connect to a peer cluster node.
+	args := &lxd.ConnectionArgs{
+		TLSServerCert: string(cert.PublicKey()),
+		TLSClientCert: string(cert.PublicKey()),
+		TLSClientKey:  string(cert.PrivateKey()),
+		// Use a special user agent to let the API handlers know they
+		// should not do any database work.
+		UserAgent: "lxd-cluster-notifier",
+	}
+	if cert.CA() != nil {
+		args.TLSCA = string(cert.CA().Raw)
+	}
+
+	notifier := func(hook func(lxd.ContainerServer) error) error {
+		errs := make([]error, len(peers))
+		wg := sync.WaitGroup{}
+		wg.Add(len(peers))
+		for i, address := range peers {
+			logger.Debugf("Notify node %s of state changes", address)
+			go func(i int, address string) {
+				defer wg.Done()
+				client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", address), args)
+				if err != nil {
+					errs[i] = errors.Wrapf(err, "failed to connect to peer %s", address)
+					return
+				}
+				err = hook(client)
+				if err != nil {
+					errs[i] = errors.Wrapf(err, "failed to notify peer %s", address)
+				}
+			}(i, address)
+		}
+		wg.Wait()
+		// TODO: aggregate all errors?
+		for i, err := range errs {
+			if err != nil {
+				// FIXME: unfortunately the LXD client currently does not
+				//        provide a way to differentiate between errors
+				if isClientConnectionError(err) && policy == NotifyAlive {
+					logger.Warnf("Could not notify node %s", peers[i])
+					continue
+				}
+				return err
+			}
+		}
+		return nil
+	}
+
+	return notifier, nil
+}
+
+// Return true if the given error is due to the LXD Go client not being able to
+// connect to the target LXD node.
+func isClientConnectionError(err error) bool {
+	// FIXME: unfortunately the LXD client currently does not
+	//        provide a way to differentiate between errors
+	return strings.Contains(err.Error(), "Unable to connect to")
+}
diff --git a/lxd/cluster/notify_test.go b/lxd/cluster/notify_test.go
new file mode 100644
index 000000000..409d04d8b
--- /dev/null
+++ b/lxd/cluster/notify_test.go
@@ -0,0 +1,180 @@
+package cluster_test
+
+import (
+	"net/http"
+	"net/http/httptest"
+	"strconv"
+	"testing"
+	"time"
+
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// The returned notifier connects to all nodes.
+func TestNewNotifier(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	f := notifyFixtures{t: t, state: state}
+	defer f.Nodes(cert, 3)()
+
+	notifier, err := cluster.NewNotifier(state, cert, cluster.NotifyAll)
+	require.NoError(t, err)
+
+	i := 0
+	hook := func(client lxd.ContainerServer) error {
+		server, _, err := client.GetServer()
+		require.NoError(t, err)
+		assert.Equal(t, f.Address(i+1), server.Config["core.https_address"])
+		i++
+		return nil
+	}
+	assert.NoError(t, notifier(hook))
+	assert.Equal(t, 2, i)
+}
+
+// Creating a new notifier fails if the policy is set to NotifyAll and one of
+// the nodes is down.
+func TestNewNotify_NotifyAllError(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	f := notifyFixtures{t: t, state: state}
+	defer f.Nodes(cert, 3)()
+
+	f.Down(1)
+	notifier, err := cluster.NewNotifier(state, cert, cluster.NotifyAll)
+	assert.Nil(t, notifier)
+	require.Error(t, err)
+	assert.Regexp(t, "peer node .+ is down", err.Error())
+}
+
+// Creating a new notifier does not fail if the policy is set to NotifyAlive
+// and one of the nodes is down, however dead nodes are ignored.
+func TestNewNotify_NotifyAlive(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	f := notifyFixtures{t: t, state: state}
+	defer f.Nodes(cert, 3)()
+
+	f.Down(1)
+	notifier, err := cluster.NewNotifier(state, cert, cluster.NotifyAlive)
+	assert.NoError(t, err)
+
+	i := 0
+	hook := func(client lxd.ContainerServer) error {
+		i++
+		return nil
+	}
+	assert.NoError(t, notifier(hook))
+	assert.Equal(t, 1, i)
+}
+
+// Helper for setting fixtures for Notify tests.
+type notifyFixtures struct {
+	t     *testing.T
+	state *state.State
+}
+
+// Spawn the given number of fake nodes, save in them in the database and
+// return a cleanup function.
+//
+// The address of the first node spawned will be saved as local
+// core.https_address.
+func (h *notifyFixtures) Nodes(cert *shared.CertInfo, n int) func() {
+	servers := make([]*httptest.Server, n)
+	for i := 0; i < n; i++ {
+		servers[i] = newRestServer(cert)
+	}
+
+	// Insert new entries in the nodes table of the cluster database.
+	err := h.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		for i := 0; i < n; i++ {
+			name := strconv.Itoa(i)
+			address := servers[i].Listener.Addr().String()
+			_, err := tx.NodeAdd(name, address)
+			require.NoError(h.t, err)
+		}
+		return nil
+	})
+	require.NoError(h.t, err)
+
+	// Set the address in the config table of the node database.
+	err = h.state.Node.Transaction(func(tx *db.NodeTx) error {
+		config, err := node.ConfigLoad(tx)
+		require.NoError(h.t, err)
+		address := servers[0].Listener.Addr().String()
+		values := map[string]interface{}{"core.https_address": address}
+		require.NoError(h.t, config.Patch(values))
+		return nil
+	})
+	require.NoError(h.t, err)
+
+	cleanup := func() {
+		for _, server := range servers {
+			server.Close()
+		}
+	}
+
+	return cleanup
+}
+
+// Return the network address of the i-th node.
+func (h *notifyFixtures) Address(i int) string {
+	var address string
+	err := h.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(h.t, err)
+		address = nodes[i].Address
+		return nil
+	})
+	require.NoError(h.t, err)
+	return address
+}
+
+// Mark the i'th node as down.
+func (h *notifyFixtures) Down(i int) {
+	err := h.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(h.t, err)
+		err = tx.NodeHeartbeat(nodes[i].Address, time.Now().Add(-time.Minute))
+		require.NoError(h.t, err)
+		return nil
+	})
+	require.NoError(h.t, err)
+}
+
+// Returns a minimal stub for the LXD RESTful API server, just realistic
+// enough to make lxd.ConnectLXD succeed.
+func newRestServer(cert *shared.CertInfo) *httptest.Server {
+	mux := http.NewServeMux()
+
+	server := httptest.NewUnstartedServer(mux)
+	server.TLS = util.ServerTLSConfig(cert)
+	server.StartTLS()
+
+	mux.HandleFunc("/1.0/", func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/json")
+		config := map[string]interface{}{"core.https_address": server.Listener.Addr().String()}
+		metadata := api.ServerPut{Config: config}
+		util.WriteJSON(w, api.ResponseRaw{Metadata: metadata}, false)
+	})
+
+	return server
+}

From 59abfdb31160b4f5300ac5c462b20c6a409d7df8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 19 Oct 2017 13:39:04 +0000
Subject: [PATCH 042/227] Make /1.0 PUT/PATCH API handlers update the cluster
 database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go             | 116 ++++++++++++++++-----------------------
 lxd/cluster/config.go      |  71 +++++++++++++++++++++---
 lxd/cluster/config_test.go |   9 +--
 lxd/daemon_config.go       | 133 +++------------------------------------------
 lxd/daemon_test.go         |  43 ---------------
 lxd/main_init_test.go      |  93 +++++++++++++++++++++++++------
 lxd/node/config.go         |   2 +-
 lxd/patches.go             |   3 +-
 8 files changed, 201 insertions(+), 269 deletions(-)
 delete mode 100644 lxd/daemon_test.go

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 84911cc06..bf56e82a1 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -1,13 +1,12 @@
 package main
 
 import (
-	"fmt"
 	"net/http"
 	"os"
-	"reflect"
 
 	"gopkg.in/lxc/go-lxc.v2"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/config"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
@@ -157,16 +156,6 @@ func api10Get(d *Daemon, r *http.Request) Response {
 }
 
 func api10Put(d *Daemon, r *http.Request) Response {
-	var oldConfig map[string]string
-	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
-		var err error
-		oldConfig, err = tx.Config()
-		return err
-	})
-	if err != nil {
-		return SmartError(err)
-	}
-
 	render, err := daemonConfigRender(d.State())
 	if err != nil {
 		return InternalError(err)
@@ -181,20 +170,10 @@ func api10Put(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
-	return doApi10Update(d, oldConfig, req)
+	return doApi10Update(d, req, false)
 }
 
 func api10Patch(d *Daemon, r *http.Request) Response {
-	var oldConfig map[string]string
-	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
-		var err error
-		oldConfig, err = tx.Config()
-		return err
-	})
-	if err != nil {
-		return SmartError(err)
-	}
-
 	render, err := daemonConfigRender(d.State())
 	if err != nil {
 		return InternalError(err)
@@ -213,17 +192,10 @@ func api10Patch(d *Daemon, r *http.Request) Response {
 		return EmptySyncResponse
 	}
 
-	for k, v := range oldConfig {
-		_, ok := req.Config[k]
-		if !ok {
-			req.Config[k] = v
-		}
-	}
-
-	return doApi10Update(d, oldConfig, req)
+	return doApi10Update(d, req, true)
 }
 
-func doApi10Update(d *Daemon, oldConfig map[string]string, req api.ServerPut) Response {
+func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 	// The HTTPS address is the only config key that we want to save in the
 	// node-level database, so handle it here.
 	nodeValues := map[string]interface{}{}
@@ -247,51 +219,55 @@ func doApi10Update(d *Daemon, oldConfig map[string]string, req api.ServerPut) Re
 		return err
 	})
 	if err != nil {
-		return InternalError(err)
-	}
-
-	// Deal with special keys
-	for k, v := range req.Config {
-		config := daemonConfig[k]
-		if config != nil && config.hiddenValue && v == true {
-			req.Config[k] = oldConfig[k]
-		}
-	}
-
-	// Diff the configs
-	changedConfig := map[string]interface{}{}
-	for key, value := range oldConfig {
-		if req.Config[key] != value {
-			changedConfig[key] = req.Config[key]
-		}
-	}
-
-	for key, value := range req.Config {
-		if oldConfig[key] != value {
-			changedConfig[key] = req.Config[key]
+		switch err.(type) {
+		case config.ErrorList:
+			return BadRequest(err)
+		default:
+			return SmartError(err)
 		}
 	}
 
-	for key, valueRaw := range changedConfig {
-		if valueRaw == nil {
-			valueRaw = ""
+	var changed map[string]string
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		if err != nil {
+			return errors.Wrap(err, "failed to load cluster config")
 		}
-
-		s := reflect.ValueOf(valueRaw)
-		if !s.IsValid() || s.Kind() != reflect.String {
-			return BadRequest(fmt.Errorf("Invalid value type for '%s'", key))
+		if patch {
+			changed, err = config.Patch(req.Config)
+		} else {
+			changed, err = config.Replace(req.Config)
 		}
-
-		value := valueRaw.(string)
-
-		confKey, ok := daemonConfig[key]
-		if !ok {
-			return BadRequest(fmt.Errorf("Bad server config key: '%s'", key))
+		return err
+	})
+	if err != nil {
+		switch err.(type) {
+		case config.ErrorList:
+			return BadRequest(err)
+		default:
+			return SmartError(err)
 		}
+	}
 
-		err := confKey.Set(d, value)
-		if err != nil {
-			return SmartError(err)
+	daemonConfigInit(d.cluster)
+
+	for key, value := range changed {
+		switch key {
+		case "core.proxy_http":
+			fallthrough
+		case "core.proxy_https":
+			fallthrough
+		case "core.proxy_ignore_hosts":
+			daemonConfigSetProxy(d, changed)
+		case "core.macaroon.endpoint":
+			err := d.setupExternalAuthentication(value)
+			if err != nil {
+				return SmartError(err)
+			}
+		case "images.auto_update_interval":
+			d.taskAutoUpdate.Reset()
+		case "images.remote_cache_expiry":
+			d.taskPruneImages.Reset()
 		}
 	}
 
diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index 16d8e60a3..c23950f2d 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -1,7 +1,14 @@
 package cluster
 
 import (
+	"crypto/rand"
+	"encoding/hex"
 	"fmt"
+	"io"
+	"os/exec"
+	"time"
+
+	"golang.org/x/crypto/scrypt"
 
 	"github.com/lxc/lxd/lxd/config"
 	"github.com/lxc/lxd/lxd/db"
@@ -50,6 +57,17 @@ func (c *Config) HTTPSAllowedCredentials() bool {
 	return c.m.GetBool("core.https_allowed_credentials")
 }
 
+// TrustPassword returns the LXD trust password for authenticating clients.
+func (c *Config) TrustPassword() string {
+	return c.m.GetString("core.trust_password")
+}
+
+// AutoUpdateInterval returns the configured images auto update interval.
+func (c *Config) AutoUpdateInterval() time.Duration {
+	n := c.m.GetInt64("images.auto_update_interval")
+	return time.Duration(n) * time.Hour
+}
+
 // ProxyHTTP returns the configured HTTP proxy, if any.
 func (c *Config) ProxyHTTP() string {
 	return c.m.GetString("core.proxy_http")
@@ -62,12 +80,16 @@ func (c *Config) Dump() map[string]interface{} {
 }
 
 // Replace the current configuration with the given values.
-func (c *Config) Replace(values map[string]interface{}) error {
+//
+// Return what has actually changed.
+func (c *Config) Replace(values map[string]interface{}) (map[string]string, error) {
 	return c.update(values)
 }
 
 // Patch changes only the configuration keys in the given map.
-func (c *Config) Patch(patch map[string]interface{}) error {
+//
+// Return what has actually changed.
+func (c *Config) Patch(patch map[string]interface{}) (map[string]string, error) {
 	values := c.Dump() // Use current values as defaults
 	for name, value := range patch {
 		values[name] = value
@@ -75,18 +97,18 @@ func (c *Config) Patch(patch map[string]interface{}) error {
 	return c.update(values)
 }
 
-func (c *Config) update(values map[string]interface{}) error {
+func (c *Config) update(values map[string]interface{}) (map[string]string, error) {
 	changed, err := c.m.Change(values)
 	if err != nil {
-		return fmt.Errorf("invalid configuration changes: %s", err)
+		return nil, err
 	}
 
 	err = c.tx.UpdateConfig(changed)
 	if err != nil {
-		return fmt.Errorf("cannot persist confiuration changes: %v", err)
+		return nil, fmt.Errorf("cannot persist confiuration changes: %v", err)
 	}
 
-	return nil
+	return changed, nil
 }
 
 // ConfigSchema defines available server configuration keys.
@@ -98,11 +120,11 @@ var ConfigSchema = config.Schema{
 	"core.proxy_http":                {},
 	"core.proxy_https":               {},
 	"core.proxy_ignore_hosts":        {},
-	"core.trust_password":            {Hidden: true},
+	"core.trust_password":            {Hidden: true, Setter: passwordSetter},
 	"core.macaroon.endpoint":         {},
 	"images.auto_update_cached":      {Type: config.Bool, Default: "true"},
 	"images.auto_update_interval":    {Type: config.Int64, Default: "6"},
-	"images.compression_algorithm":   {Default: "gzip"},
+	"images.compression_algorithm":   {Default: "gzip", Validator: validateCompression},
 	"images.remote_cache_expiry":     {Type: config.Int64, Default: "10"},
 	"maas.api.key":                   {},
 	"maas.api.url":                   {},
@@ -119,6 +141,39 @@ var ConfigSchema = config.Schema{
 	"storage.zfs_use_refquota":     {Setter: deprecatedStorage, Type: config.Bool},
 }
 
+func passwordSetter(value string) (string, error) {
+	// Nothing to do on unset
+	if value == "" {
+		return value, nil
+	}
+
+	// Hash the password
+	buf := make([]byte, 32)
+	_, err := io.ReadFull(rand.Reader, buf)
+	if err != nil {
+		return "", err
+	}
+
+	hash, err := scrypt.Key([]byte(value), buf, 1<<14, 8, 1, 64)
+	if err != nil {
+		return "", err
+	}
+
+	buf = append(buf, hash...)
+	value = hex.EncodeToString(buf)
+
+	return value, nil
+}
+
+func validateCompression(value string) error {
+	if value == "none" {
+		return nil
+	}
+
+	_, err := exec.LookPath(value)
+	return err
+}
+
 func deprecatedStorage(value string) (string, error) {
 	if value == "" {
 		return "", nil
diff --git a/lxd/cluster/config_test.go b/lxd/cluster/config_test.go
index 0da3979d8..ee67ac3d1 100644
--- a/lxd/cluster/config_test.go
+++ b/lxd/cluster/config_test.go
@@ -58,10 +58,11 @@ func TestConfig_ReplaceDeleteValues(t *testing.T) {
 	config, err := cluster.ConfigLoad(tx)
 	require.NoError(t, err)
 
-	err = config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
+	changed, err := config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
 	assert.NoError(t, err)
+	assert.Equal(t, map[string]string{"core.proxy_http": "foo.bar"}, changed)
 
-	err = config.Replace(map[string]interface{}{})
+	_, err = config.Replace(map[string]interface{}{})
 	assert.NoError(t, err)
 
 	assert.Equal(t, "", config.ProxyHTTP())
@@ -80,10 +81,10 @@ func TestConfig_PatchKeepsValues(t *testing.T) {
 	config, err := cluster.ConfigLoad(tx)
 	require.NoError(t, err)
 
-	err = config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
+	_, err = config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
 	assert.NoError(t, err)
 
-	err = config.Patch(map[string]interface{}{})
+	_, err = config.Patch(map[string]interface{}{})
 	assert.NoError(t, err)
 
 	assert.Equal(t, "foo.bar", config.ProxyHTTP())
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index d6f762e2f..d89b99e9a 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -1,17 +1,12 @@
 package main
 
 import (
-	"crypto/rand"
-	"encoding/hex"
 	"fmt"
-	"io"
-	"os/exec"
 	"strconv"
 	"strings"
 	"sync"
 
 	log "github.com/lxc/lxd/shared/log15"
-	"golang.org/x/crypto/scrypt"
 
 	"github.com/lxc/lxd/lxd/cluster"
 	dbapi "github.com/lxc/lxd/lxd/db"
@@ -96,53 +91,6 @@ func (k *daemonConfigKey) Validate(d *Daemon, value string) error {
 	return nil
 }
 
-func (k *daemonConfigKey) Set(d *Daemon, value string) error {
-	var name string
-
-	// Check if we are actually changing things
-	oldValue := k.currentValue
-	if oldValue == value {
-		return nil
-	}
-
-	// Validate the new value
-	err := k.Validate(d, value)
-	if err != nil {
-		return err
-	}
-
-	// Run external setting function
-	if k.setter != nil {
-		value, err = k.setter(d, k.name(), value)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Get the configuration key and make sure daemonConfig is sane
-	name = k.name()
-	if name == "" {
-		return fmt.Errorf("Corrupted configuration cache")
-	}
-
-	// Actually apply the change
-	daemonConfigLock.Lock()
-	k.currentValue = value
-	daemonConfigLock.Unlock()
-
-	err = dbapi.ConfigValueSet(d.cluster, name, value)
-	if err != nil {
-		return err
-	}
-
-	// Run the trigger (if any)
-	if k.trigger != nil {
-		k.trigger(d, k.name(), value)
-	}
-
-	return nil
-}
-
 func (k *daemonConfigKey) Get() string {
 	value := k.currentValue
 
@@ -182,16 +130,16 @@ func (k *daemonConfigKey) GetInt64() int64 {
 func daemonConfigInit(cluster *dbapi.Cluster) error {
 	// Set all the keys
 	daemonConfig = map[string]*daemonConfigKey{
-		"core.proxy_http":         {valueType: "string", setter: daemonConfigSetProxy},
-		"core.proxy_https":        {valueType: "string", setter: daemonConfigSetProxy},
-		"core.proxy_ignore_hosts": {valueType: "string", setter: daemonConfigSetProxy},
-		"core.trust_password":     {valueType: "string", hiddenValue: true, setter: daemonConfigSetPassword},
-		"core.macaroon.endpoint":  {valueType: "string", setter: daemonConfigSetMacaroonEndpoint},
+		"core.proxy_http":         {valueType: "string"},
+		"core.proxy_https":        {valueType: "string"},
+		"core.proxy_ignore_hosts": {valueType: "string"},
+		"core.trust_password":     {valueType: "string", hiddenValue: true},
+		"core.macaroon.endpoint":  {valueType: "string"},
 
 		"images.auto_update_cached":    {valueType: "bool", defaultValue: "true"},
-		"images.auto_update_interval":  {valueType: "int", defaultValue: "6", trigger: daemonConfigTriggerAutoUpdateInterval},
-		"images.compression_algorithm": {valueType: "string", validator: daemonConfigValidateCompression, defaultValue: "gzip"},
-		"images.remote_cache_expiry":   {valueType: "int", defaultValue: "10", trigger: daemonConfigTriggerExpiry},
+		"images.auto_update_interval":  {valueType: "int", defaultValue: "6"},
+		"images.compression_algorithm": {valueType: "string", defaultValue: "gzip"},
+		"images.remote_cache_expiry":   {valueType: "int", defaultValue: "10"},
 
 		"maas.api.key": {valueType: "string", setter: daemonConfigSetMAAS},
 		"maas.api.url": {valueType: "string", setter: daemonConfigSetMAAS},
@@ -259,49 +207,7 @@ func daemonConfigRender(state *state.State) (map[string]interface{}, error) {
 	return config, nil
 }
 
-func daemonConfigSetPassword(d *Daemon, key string, value string) (string, error) {
-	// Nothing to do on unset
-	if value == "" {
-		return value, nil
-	}
-
-	// Hash the password
-	buf := make([]byte, 32)
-	_, err := io.ReadFull(rand.Reader, buf)
-	if err != nil {
-		return "", err
-	}
-
-	hash, err := scrypt.Key([]byte(value), buf, 1<<14, 8, 1, 64)
-	if err != nil {
-		return "", err
-	}
-
-	buf = append(buf, hash...)
-	value = hex.EncodeToString(buf)
-
-	return value, nil
-}
-
-func daemonConfigSetMacaroonEndpoint(d *Daemon, key string, value string) (string, error) {
-	err := d.setupExternalAuthentication(value)
-	if err != nil {
-		return "", err
-	}
-
-	return value, nil
-}
-
-func daemonConfigSetProxy(d *Daemon, key string, value string) (string, error) {
-	// Get the current config
-	config := map[string]string{}
-	config["core.proxy_https"] = daemonConfig["core.proxy_https"].Get()
-	config["core.proxy_http"] = daemonConfig["core.proxy_http"].Get()
-	config["core.proxy_ignore_hosts"] = daemonConfig["core.proxy_ignore_hosts"].Get()
-
-	// Apply the change
-	config[key] = value
-
+func daemonConfigSetProxy(d *Daemon, config map[string]string) {
 	// Update the cached proxy function
 	d.proxy = shared.ProxyFromConfig(
 		config["core.proxy_https"],
@@ -315,8 +221,6 @@ func daemonConfigSetProxy(d *Daemon, key string, value string) (string, error) {
 		delete(imageStreamCache, k)
 	}
 	imageStreamCacheLock.Unlock()
-
-	return value, nil
 }
 
 func daemonConfigSetMAAS(d *Daemon, key string, value string) (string, error) {
@@ -342,22 +246,3 @@ func daemonConfigSetMAAS(d *Daemon, key string, value string) (string, error) {
 
 	return value, nil
 }
-
-func daemonConfigTriggerExpiry(d *Daemon, key string, value string) {
-	// Trigger an image pruning run
-	d.taskPruneImages.Reset()
-}
-
-func daemonConfigTriggerAutoUpdateInterval(d *Daemon, key string, value string) {
-	// Reset the auto-update interval loop
-	d.taskAutoUpdate.Reset()
-}
-
-func daemonConfigValidateCompression(d *Daemon, key string, value string) error {
-	if value == "none" {
-		return nil
-	}
-
-	_, err := exec.LookPath(value)
-	return err
-}
diff --git a/lxd/daemon_test.go b/lxd/daemon_test.go
deleted file mode 100644
index 9ce47e92b..000000000
--- a/lxd/daemon_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package main
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/suite"
-)
-
-type daemonTestSuite struct {
-	lxdTestSuite
-}
-
-func (suite *daemonTestSuite) Test_config_value_set_empty_removes_val() {
-	var err error
-	d := suite.d
-
-	err = daemonConfig["core.trust_password"].Set(d, "foo")
-	suite.Req.Nil(err)
-
-	val := daemonConfig["core.trust_password"].Get()
-	suite.Req.Equal(len(val), 192)
-
-	valMap, err := daemonConfigRender(d.State())
-	suite.Req.NoError(err)
-	value, present := valMap["core.trust_password"]
-	suite.Req.True(present)
-	suite.Req.Equal(value, true)
-
-	err = daemonConfig["core.trust_password"].Set(d, "")
-	suite.Req.Nil(err)
-
-	val = daemonConfig["core.trust_password"].Get()
-	suite.Req.Equal(val, "")
-
-	valMap, err = daemonConfigRender(d.State())
-	suite.Req.NoError(err)
-	_, present = valMap["core.trust_password"]
-	suite.Req.False(present)
-}
-
-func TestDaemonTestSuite(t *testing.T) {
-	suite.Run(t, new(daemonTestSuite))
-}
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 8ea05500f..94dce4347 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -6,8 +6,11 @@ import (
 	"path/filepath"
 	"strconv"
 	"testing"
+	"time"
 
 	"github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
 
@@ -93,8 +96,13 @@ func (suite *cmdInitTestSuite) TestCmdInit_PreseedHTTPSAddressAndTrustPassword()
 	address, err := node.HTTPSAddress(suite.d.db)
 	suite.Req.NoError(err)
 	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
-	secret := daemonConfig["core.trust_password"].Get()
-	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
+	err = suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Nil(util.PasswordCheck(config.TrustPassword(), "sekret"))
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // Input network address and trust password interactively.
@@ -116,8 +124,13 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveHTTPSAddressAndTrustPasswo
 	address, err := node.HTTPSAddress(suite.d.db)
 	suite.Req.NoError(err)
 	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
-	secret := daemonConfig["core.trust_password"].Get()
-	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
+	err = suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Nil(util.PasswordCheck(config.TrustPassword(), "sekret"))
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // Enable clustering interactively.
@@ -155,8 +168,13 @@ func (suite *cmdInitTestSuite) TestCmdInit_AutoHTTPSAddressAndTrustPassword() {
 	address, err := node.HTTPSAddress(suite.d.db)
 	suite.Req.NoError(err)
 	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
-	secret := daemonConfig["core.trust_password"].Get()
-	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
+	err = suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Nil(util.PasswordCheck(config.TrustPassword(), "sekret"))
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // The images auto-update interval can be interactively set by simply accepting
@@ -169,15 +187,25 @@ func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateAnswerYes() {
 
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["images.auto_update_interval"]
-	suite.Req.Equal("6", key.Get())
+	err := suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Equal(6*time.Hour, config.AutoUpdateInterval())
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // If the images auto-update interval value is already set to non-zero, it
 // won't be overwritten.
 func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateNoOverwrite() {
-	key, _ := daemonConfig["images.auto_update_interval"]
-	err := key.Set(suite.d, "10")
+	err := suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		_, err = config.Patch(map[string]interface{}{"images.auto_update_interval": "10"})
+		suite.Req.NoError(err)
+		return nil
+	})
 	suite.Req.Nil(err)
 
 	answers := &cmdInitAnswers{
@@ -187,7 +215,13 @@ func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateNoOverwrite() {
 
 	suite.Req.Nil(suite.command.Run())
 
-	suite.Req.Equal("10", key.Get())
+	err = suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Equal(10*time.Hour, config.AutoUpdateInterval())
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // If an invalid backend type is passed with --storage-backend, an
@@ -243,15 +277,26 @@ func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateAnswerNo() {
 
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["images.auto_update_interval"]
-	suite.Req.Equal("0", key.Get())
+	err := suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Equal(time.Duration(0), config.AutoUpdateInterval())
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // If the user answers "no" to the images auto-update question, the value will
 // be set to 0, even it was already set to some value.
 func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateOverwriteIfZero() {
-	key, _ := daemonConfig["images.auto_update_interval"]
-	key.Set(suite.d, "10")
+	err := suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		_, err = config.Patch(map[string]interface{}{"images.auto_update_interval": "10"})
+		suite.Req.NoError(err)
+		return nil
+	})
+	suite.Req.Nil(err)
 
 	answers := &cmdInitAnswers{
 		WantImageAutoUpdate: false,
@@ -259,7 +304,14 @@ func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateOverwriteIfZero() {
 	answers.Render(suite.streams)
 
 	suite.Req.Nil(suite.command.Run())
-	suite.Req.Equal("0", key.Get())
+
+	err = suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Equal(time.Duration(0), config.AutoUpdateInterval())
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // Preseed the image auto-update interval.
@@ -270,8 +322,13 @@ func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdatePreseed() {
 `)
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["images.auto_update_interval"]
-	suite.Req.Equal("15", key.Get())
+	err := suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Equal(15*time.Hour, config.AutoUpdateInterval())
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // If --storage-backend is set to "dir" a storage pool is created.
diff --git a/lxd/node/config.go b/lxd/node/config.go
index 26f06004d..68cb5dd9c 100644
--- a/lxd/node/config.go
+++ b/lxd/node/config.go
@@ -75,7 +75,7 @@ func HTTPSAddress(node *db.Node) (string, error) {
 func (c *Config) update(values map[string]interface{}) error {
 	changed, err := c.m.Change(values)
 	if err != nil {
-		return fmt.Errorf("invalid configuration changes: %s", err)
+		return err
 	}
 
 	err = c.tx.UpdateConfig(changed)
diff --git a/lxd/patches.go b/lxd/patches.go
index a78588d40..3ec89394c 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -284,7 +284,7 @@ func patchStorageApi(name string, d *Daemon) error {
 		if err != nil {
 			return err
 		}
-		return config.Patch(map[string]interface{}{
+		_, err = config.Patch(map[string]interface{}{
 			"storage.lvm_fstype":           "",
 			"storage.lvm_mount_options":    "",
 			"storage.lvm_thinpool_name":    "",
@@ -294,6 +294,7 @@ func patchStorageApi(name string, d *Daemon) error {
 			"storage.zfs_remove_snapshots": "",
 			"storage.zfs_use_refquota":     "",
 		})
+		return err
 	})
 	if err != nil {
 		return err

From 130c13712de4afcae7e7d97d4ea2469aacbdc065 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 19 Oct 2017 15:10:56 +0000
Subject: [PATCH 043/227] Drop the daemonConfig  cache and access the db
 directly instead

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go         |  40 ++++++++--
 lxd/api_cluster.go     |   5 +-
 lxd/certificates.go    |   8 +-
 lxd/cluster/config.go  |  79 +++++++++++++++++++
 lxd/containers_post.go |   9 ++-
 lxd/daemon.go          |  50 ++++++------
 lxd/daemon_config.go   | 202 ++-----------------------------------------------
 lxd/daemon_images.go   |   6 +-
 lxd/images.go          |  45 ++++++++---
 lxd/main_init_test.go  |   5 +-
 10 files changed, 206 insertions(+), 243 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index bf56e82a1..36e1fd62f 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -63,8 +63,18 @@ var api10 = []Command{
 
 func api10Get(d *Daemon, r *http.Request) Response {
 	authMethods := []string{"tls"}
-	if daemonConfig["core.macaroon.endpoint"].Get() != "" {
-		authMethods = append(authMethods, "macaroons")
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		if err != nil {
+			return err
+		}
+		if config.MacaroonEndpoint() != "" {
+			authMethods = append(authMethods, "macaroons")
+		}
+		return nil
+	})
+	if err != nil {
+		return SmartError(err)
 	}
 	srv := api.ServerUntrusted{
 		APIExtensions: version.APIExtensions,
@@ -228,15 +238,17 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 	}
 
 	var changed map[string]string
+	var newConfig *cluster.Config
 	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
-		config, err := cluster.ConfigLoad(tx)
+		var err error
+		newConfig, err = cluster.ConfigLoad(tx)
 		if err != nil {
 			return errors.Wrap(err, "failed to load cluster config")
 		}
 		if patch {
-			changed, err = config.Patch(req.Config)
+			changed, err = newConfig.Patch(req.Config)
 		} else {
-			changed, err = config.Replace(req.Config)
+			changed, err = newConfig.Replace(req.Config)
 		}
 		return err
 	})
@@ -249,8 +261,7 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 		}
 	}
 
-	daemonConfigInit(d.cluster)
-
+	maasControllerChanged := false
 	for key, value := range changed {
 		switch key {
 		case "core.proxy_http":
@@ -258,7 +269,13 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 		case "core.proxy_https":
 			fallthrough
 		case "core.proxy_ignore_hosts":
-			daemonConfigSetProxy(d, changed)
+			daemonConfigSetProxy(d, newConfig)
+		case "maas.api.url":
+			fallthrough
+		case "maas.api.key":
+			fallthrough
+		case "maas.machine":
+			maasControllerChanged = true
 		case "core.macaroon.endpoint":
 			err := d.setupExternalAuthentication(value)
 			if err != nil {
@@ -270,6 +287,13 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 			d.taskPruneImages.Reset()
 		}
 	}
+	if maasControllerChanged {
+		url, key, machine := newConfig.MAASController()
+		err := d.setupMAASController(url, key, machine)
+		if err != nil {
+			return SmartError(err)
+		}
+	}
 
 	return EmptySyncResponse
 }
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 00ff4a0ff..c00d763c5 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -71,7 +71,10 @@ func clusterPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 func clusterPostAccept(d *Daemon, req api.ClusterPost) Response {
 	// Accepting a node requires the client to provide the correct
 	// trust password.
-	secret := daemonConfig["core.trust_password"].Get()
+	secret, err := cluster.ConfigGetString(d.cluster, "core.trust_password")
+	if err != nil {
+		return SmartError(err)
+	}
 	if util.PasswordCheck(secret, req.TargetPassword) != nil {
 		return Forbidden
 	}
diff --git a/lxd/certificates.go b/lxd/certificates.go
index 97dba38a7..fd3c2ea6c 100644
--- a/lxd/certificates.go
+++ b/lxd/certificates.go
@@ -11,6 +11,7 @@ import (
 
 	"github.com/gorilla/mux"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -98,7 +99,10 @@ func certificatesPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Access check
-	secret := daemonConfig["core.trust_password"].Get()
+	secret, err := cluster.ConfigGetString(d.cluster, "core.trust_password")
+	if err != nil {
+		return SmartError(err)
+	}
 	if d.checkTrustedClient(r) != nil && util.PasswordCheck(secret, req.Password) != nil {
 		return Forbidden
 	}
@@ -144,7 +148,7 @@ func certificatesPost(d *Daemon, r *http.Request) Response {
 		}
 	}
 
-	err := saveCert(d.db, name, cert)
+	err = saveCert(d.db, name, cert)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index c23950f2d..142b4e8ba 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -62,17 +62,47 @@ func (c *Config) TrustPassword() string {
 	return c.m.GetString("core.trust_password")
 }
 
+// MacaroonEndpoint returns the address of the macaroon endpoint to use for
+// authentication, if any.
+func (c *Config) MacaroonEndpoint() string {
+	return c.m.GetString("core.macaroon.endpoint")
+}
+
 // AutoUpdateInterval returns the configured images auto update interval.
 func (c *Config) AutoUpdateInterval() time.Duration {
 	n := c.m.GetInt64("images.auto_update_interval")
 	return time.Duration(n) * time.Hour
 }
 
+// RemoteCacheExpiry returns the configured expiration value for remote images
+// expiration.
+func (c *Config) RemoteCacheExpiry() int64 {
+	return c.m.GetInt64("images.remote_cache_expiry")
+}
+
+// ProxyHTTPS returns the configured HTTPS proxy, if any.
+func (c *Config) ProxyHTTPS() string {
+	return c.m.GetString("core.proxy_https")
+}
+
 // ProxyHTTP returns the configured HTTP proxy, if any.
 func (c *Config) ProxyHTTP() string {
 	return c.m.GetString("core.proxy_http")
 }
 
+// ProxyIgnoreHosts returns the configured ignore-hosts proxy setting, if any.
+func (c *Config) ProxyIgnoreHosts() string {
+	return c.m.GetString("core.proxy_ignore_hosts")
+}
+
+// MAASController the configured MAAS url, key and machine.
+func (c *Config) MAASController() (string, string, string) {
+	url := c.m.GetString("maas.api.url")
+	key := c.m.GetString("maas.api.key")
+	machine := c.m.GetString("maas.machine")
+	return url, key, machine
+}
+
 // Dump current configuration keys and their values. Keys with values matching
 // their defaults are omitted.
 func (c *Config) Dump() map[string]interface{} {
@@ -111,6 +141,55 @@ func (c *Config) update(values map[string]interface{}) (map[string]string, error
 	return changed, nil
 }
 
+// ConfigGetString is a convenience for loading the cluster configuration and
+// returning the value of a particular key.
+//
+// It's a deprecated API meant to be used by call sites that are not
+// interacting with the database in a transactional way.
+func ConfigGetString(cluster *db.Cluster, key string) (string, error) {
+	config, err := configGet(cluster)
+	if err != nil {
+		return "", err
+	}
+	return config.m.GetString(key), nil
+}
+
+// ConfigGetBool is a convenience for loading the cluster configuration and
+// returning the value of a particular boolean key.
+//
+// It's a deprecated API meant to be used by call sites that are not
+// interacting with the database in a transactional way.
+func ConfigGetBool(cluster *db.Cluster, key string) (bool, error) {
+	config, err := configGet(cluster)
+	if err != nil {
+		return false, err
+	}
+	return config.m.GetBool(key), nil
+}
+
+// ConfigGetInt64 is a convenience for loading the cluster configuration and
+// returning the value of a particular key.
+//
+// It's a deprecated API meant to be used by call sites that are not
+// interacting with the database in a transactional way.
+func ConfigGetInt64(cluster *db.Cluster, key string) (int64, error) {
+	config, err := configGet(cluster)
+	if err != nil {
+		return 0, err
+	}
+	return config.m.GetInt64(key), nil
+}
+
+func configGet(cluster *db.Cluster) (*Config, error) {
+	var config *Config
+	err := cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		config, err = ConfigLoad(tx)
+		return err
+	})
+	return config, err
+}
+
 // ConfigSchema defines available server configuration keys.
 var ConfigSchema = config.Schema{
 	"core.https_allowed_headers":     {},
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 76d2a615a..5ac3e0b34 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -11,6 +11,7 @@ import (
 	"github.com/dustinkirkland/golang-petname"
 	"github.com/gorilla/websocket"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/types"
 	"github.com/lxc/lxd/shared"
@@ -96,9 +97,13 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 
 		var info *api.Image
 		if req.Source.Server != "" {
+			autoUpdate, err := cluster.ConfigGetBool(d.cluster, "images.auto_update_cached")
+			if err != nil {
+				return err
+			}
 			info, err = d.ImageDownload(
-				op, req.Source.Server, req.Source.Protocol, req.Source.Certificate, req.Source.Secret,
-				hash, true, daemonConfig["images.auto_update_cached"].GetBool(), "", true)
+				op, req.Source.Server, req.Source.Protocol, req.Source.Certificate,
+				req.Source.Secret, hash, true, autoUpdate, "", true)
 			if err != nil {
 				return err
 			}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index b0f9684a4..8582953a4 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -433,12 +433,6 @@ func (d *Daemon) init() error {
 		}
 	}
 
-	/* Load all config values from the database */
-	err = daemonConfigInit(d.cluster)
-	if err != nil {
-		return err
-	}
-
 	/* Read the storage pools */
 	err = SetupStorageDriver(d.State(), false)
 	if err != nil {
@@ -466,32 +460,42 @@ func (d *Daemon) init() error {
 	/* Log expiry */
 	d.tasks.Add(expireLogsTask(d.State()))
 
-	/* set the initial proxy function based on config values in the DB */
-	d.proxy = shared.ProxyFromConfig(
-		daemonConfig["core.proxy_https"].Get(),
-		daemonConfig["core.proxy_http"].Get(),
-		daemonConfig["core.proxy_ignore_hosts"].Get(),
-	)
-
-	if !d.os.MockMode {
-		/* Start the scheduler */
-		go deviceEventListener(d.State())
-		readSavedClientCAList(d)
+	/* set the initial proxy function and external auth based on config values in the DB */
+	macaroonEndpoint := ""
+	maasAPIURL := ""
+	maasAPIKey := ""
+	maasMachine := ""
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		if err != nil {
+			return err
+		}
+		d.proxy = shared.ProxyFromConfig(
+			config.ProxyHTTPS(), config.ProxyHTTP(), config.ProxyIgnoreHosts(),
+		)
+		macaroonEndpoint = config.MacaroonEndpoint()
+		maasAPIURL, maasAPIKey, maasMachine = config.MAASController()
+		return nil
+	})
+	if err != nil {
+		return err
 	}
-
-	err = d.setupExternalAuthentication(daemonConfig["core.macaroon.endpoint"].Get())
+	err = d.setupExternalAuthentication(macaroonEndpoint)
 	if err != nil {
 		return err
 	}
 
-	err = d.setupMAASController(
-		daemonConfig["maas.api.url"].Get(),
-		daemonConfig["maas.api.key"].Get(),
-		daemonConfig["maas.machine"].Get())
+	err = d.setupMAASController(maasAPIURL, maasAPIKey, maasMachine)
 	if err != nil {
 		return err
 	}
 
+	if !d.os.MockMode {
+		/* Start the scheduler */
+		go deviceEventListener(d.State())
+		readSavedClientCAList(d)
+	}
+
 	close(d.setupChan)
 
 	// Run the post initialization actions
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index d89b99e9a..a3f9a6348 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -1,182 +1,18 @@
 package main
 
 import (
-	"fmt"
-	"strconv"
-	"strings"
-	"sync"
-
-	log "github.com/lxc/lxd/shared/log15"
-
 	"github.com/lxc/lxd/lxd/cluster"
-	dbapi "github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/logger"
 )
 
-var daemonConfigLock sync.Mutex
-var daemonConfig map[string]*daemonConfigKey
-
-type daemonConfigKey struct {
-	valueType    string
-	defaultValue string
-	validValues  []string
-	currentValue string
-	hiddenValue  bool
-
-	validator func(d *Daemon, key string, value string) error
-	setter    func(d *Daemon, key string, value string) (string, error)
-	trigger   func(d *Daemon, key string, value string)
-}
-
-func (k *daemonConfigKey) name() string {
-	name := ""
-
-	// Look for a matching entry in daemonConfig
-	daemonConfigLock.Lock()
-	for key, value := range daemonConfig {
-		if value == k {
-			name = key
-			break
-		}
-	}
-	daemonConfigLock.Unlock()
-
-	return name
-}
-
-func (k *daemonConfigKey) Validate(d *Daemon, value string) error {
-	// Handle unsetting
-	if value == "" {
-		value = k.defaultValue
-
-		if k.validator != nil {
-			err := k.validator(d, k.name(), value)
-			if err != nil {
-				return err
-			}
-		}
-
-		return nil
-	}
-
-	// Validate booleans
-	if k.valueType == "bool" && !shared.StringInSlice(strings.ToLower(value), []string{"true", "false", "1", "0", "yes", "no", "on", "off"}) {
-		return fmt.Errorf("Invalid value for a boolean: %s", value)
-	}
-
-	// Validate integers
-	if k.valueType == "int" {
-		_, err := strconv.ParseInt(value, 10, 64)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Check against valid values
-	if k.validValues != nil && !shared.StringInSlice(value, k.validValues) {
-		return fmt.Errorf("Invalid value, only the following values are allowed: %s", k.validValues)
-	}
-
-	// Run external validation function
-	if k.validator != nil {
-		err := k.validator(d, k.name(), value)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (k *daemonConfigKey) Get() string {
-	value := k.currentValue
-
-	// Get the default value if not set
-	if value == "" {
-		value = k.defaultValue
-	}
-
-	return value
-}
-
-func (k *daemonConfigKey) GetBool() bool {
-	value := k.currentValue
-
-	// Get the default value if not set
-	if value == "" {
-		value = k.defaultValue
-	}
-
-	// Convert to boolean
-	return shared.IsTrue(value)
-}
-
-func (k *daemonConfigKey) GetInt64() int64 {
-	value := k.currentValue
-
-	// Get the default value if not set
-	if value == "" {
-		value = k.defaultValue
-	}
-
-	// Convert to int64
-	ret, _ := strconv.ParseInt(value, 10, 64)
-	return ret
-}
-
-func daemonConfigInit(cluster *dbapi.Cluster) error {
-	// Set all the keys
-	daemonConfig = map[string]*daemonConfigKey{
-		"core.proxy_http":         {valueType: "string"},
-		"core.proxy_https":        {valueType: "string"},
-		"core.proxy_ignore_hosts": {valueType: "string"},
-		"core.trust_password":     {valueType: "string", hiddenValue: true},
-		"core.macaroon.endpoint":  {valueType: "string"},
-
-		"images.auto_update_cached":    {valueType: "bool", defaultValue: "true"},
-		"images.auto_update_interval":  {valueType: "int", defaultValue: "6"},
-		"images.compression_algorithm": {valueType: "string", defaultValue: "gzip"},
-		"images.remote_cache_expiry":   {valueType: "int", defaultValue: "10"},
-
-		"maas.api.key": {valueType: "string", setter: daemonConfigSetMAAS},
-		"maas.api.url": {valueType: "string", setter: daemonConfigSetMAAS},
-		"maas.machine": {valueType: "string", setter: daemonConfigSetMAAS},
-	}
-
-	// Load the values from the DB
-	var dbValues map[string]string
-	err := cluster.Transaction(func(tx *dbapi.ClusterTx) error {
-		var err error
-		dbValues, err = tx.Config()
-		return err
-	})
-	if err != nil {
-		return err
-	}
-
-	daemonConfigLock.Lock()
-	for k, v := range dbValues {
-		_, ok := daemonConfig[k]
-		if !ok {
-			logger.Error("Found unknown configuration key in database", log.Ctx{"key": k})
-			continue
-		}
-
-		daemonConfig[k].currentValue = v
-	}
-	daemonConfigLock.Unlock()
-
-	return nil
-}
-
 func daemonConfigRender(state *state.State) (map[string]interface{}, error) {
 	config := map[string]interface{}{}
 
 	// Turn the config into a JSON-compatible map
-	err := state.Cluster.Transaction(func(tx *dbapi.ClusterTx) error {
+	err := state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		clusterConfig, err := cluster.ConfigLoad(tx)
 		if err != nil {
 			return err
@@ -190,7 +26,7 @@ func daemonConfigRender(state *state.State) (map[string]interface{}, error) {
 		return nil, err
 	}
 
-	err = state.Node.Transaction(func(tx *dbapi.NodeTx) error {
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
 		nodeConfig, err := node.ConfigLoad(tx)
 		if err != nil {
 			return err
@@ -207,12 +43,12 @@ func daemonConfigRender(state *state.State) (map[string]interface{}, error) {
 	return config, nil
 }
 
-func daemonConfigSetProxy(d *Daemon, config map[string]string) {
+func daemonConfigSetProxy(d *Daemon, config *cluster.Config) {
 	// Update the cached proxy function
 	d.proxy = shared.ProxyFromConfig(
-		config["core.proxy_https"],
-		config["core.proxy_http"],
-		config["core.proxy_ignore_hosts"],
+		config.ProxyHTTPS(),
+		config.ProxyHTTP(),
+		config.ProxyIgnoreHosts(),
 	)
 
 	// Clear the simplestreams cache as it's tied to the old proxy config
@@ -222,27 +58,3 @@ func daemonConfigSetProxy(d *Daemon, config map[string]string) {
 	}
 	imageStreamCacheLock.Unlock()
 }
-
-func daemonConfigSetMAAS(d *Daemon, key string, value string) (string, error) {
-	maasUrl := daemonConfig["maas.api.url"].Get()
-	if key == "maas.api.url" {
-		maasUrl = value
-	}
-
-	maasKey := daemonConfig["maas.api.key"].Get()
-	if key == "maas.api.key" {
-		maasKey = value
-	}
-
-	maasMachine := daemonConfig["maas.machine"].Get()
-	if key == "maas.machine" {
-		maasMachine = value
-	}
-
-	err := d.setupMAASController(maasUrl, maasKey, maasMachine)
-	if err != nil {
-		return "", err
-	}
-
-	return value, nil
-}
diff --git a/lxd/daemon_images.go b/lxd/daemon_images.go
index d6a5a65ca..e4b6aaf44 100644
--- a/lxd/daemon_images.go
+++ b/lxd/daemon_images.go
@@ -15,6 +15,7 @@ import (
 	"gopkg.in/yaml.v2"
 
 	"github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/sys"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -231,7 +232,10 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 	// server/protocol/alias, regardless of whether it's stale or
 	// not (we can assume that it will be not *too* stale since
 	// auto-update is on).
-	interval := daemonConfig["images.auto_update_interval"].GetInt64()
+	interval, err := cluster.ConfigGetInt64(d.cluster, "images.auto_update_interval")
+	if err != nil {
+		return nil, err
+	}
 	if preferCached && interval > 0 && alias != fp {
 		cachedFingerprint, err := d.db.ImageSourceGetCachedFingerprint(server, protocol, alias)
 		if err == nil && cachedFingerprint != fp {
diff --git a/lxd/images.go b/lxd/images.go
index 3f5a9d628..f3d9259b0 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -20,9 +20,11 @@ import (
 	"time"
 
 	"github.com/gorilla/mux"
+	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"gopkg.in/yaml.v2"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/task"
@@ -164,7 +166,10 @@ func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, builddir st
 	if req.CompressionAlgorithm != "" {
 		compress = req.CompressionAlgorithm
 	} else {
-		compress = daemonConfig["images.compression_algorithm"].Get()
+		compress, err = cluster.ConfigGetString(d.cluster, "images.compression_algorithm")
+		if err != nil {
+			return nil, err
+		}
 	}
 
 	if compress != "none" {
@@ -755,8 +760,19 @@ func autoUpdateImagesTask(d *Daemon) (task.Func, task.Schedule) {
 		autoUpdateImages(ctx, d)
 	}
 	schedule := func() (time.Duration, error) {
-		interval := daemonConfig["images.auto_update_interval"].GetInt64()
-		return time.Duration(interval) * time.Hour, nil
+		var interval time.Duration
+		err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+			config, err := cluster.ConfigLoad(tx)
+			if err != nil {
+				return errors.Wrap(err, "failed to load cluster configuration")
+			}
+			interval = config.AutoUpdateInterval()
+			return nil
+		})
+		if err != nil {
+			return 0, err
+		}
+		return interval, nil
 	}
 	return f, schedule
 }
@@ -933,8 +949,10 @@ func pruneExpiredImagesTask(d *Daemon) (task.Func, task.Schedule) {
 
 	// Skip the first run, and instead run an initial pruning synchronously
 	// before we start updating images later on in the start up process.
-	expiry := daemonConfig["images.remote_cache_expiry"].GetInt64()
-	if expiry > 0 {
+	expiry, err := cluster.ConfigGetInt64(d.cluster, "images.remote_cache_expiry")
+	if err != nil {
+		logger.Error("Unable to fetch cluster configuration", log.Ctx{"err": err})
+	} else if expiry > 0 {
 		pruneExpiredImages(context.Background(), d)
 	}
 	first := true
@@ -945,7 +963,11 @@ func pruneExpiredImagesTask(d *Daemon) (task.Func, task.Schedule) {
 			return interval, task.ErrSkip
 		}
 
-		expiry := daemonConfig["images.remote_cache_expiry"].GetInt64()
+		expiry, err := cluster.ConfigGetInt64(d.cluster, "images.remote_cache_expiry")
+		if err != nil {
+			logger.Error("Unable to fetch cluster configuration", log.Ctx{"err": err})
+			return interval, nil
+		}
 
 		// Check if we're supposed to prune at all
 		if expiry <= 0 {
@@ -959,10 +981,15 @@ func pruneExpiredImagesTask(d *Daemon) (task.Func, task.Schedule) {
 }
 
 func pruneExpiredImages(ctx context.Context, d *Daemon) {
-	// Get the list of expired images.
-	expiry := daemonConfig["images.remote_cache_expiry"].GetInt64()
-
 	logger.Infof("Pruning expired images")
+
+	expiry, err := cluster.ConfigGetInt64(d.cluster, "images.remote_cache_expiry")
+	if err != nil {
+		logger.Error("Unable to fetch cluster configuration", log.Ctx{"err": err})
+		return
+	}
+
+	// Get the list of expired images.
 	images, err := d.db.ImagesGetExpired(expiry)
 	if err != nil {
 		logger.Error("Unable to retrieve the list of expired images", log.Ctx{"err": err})
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 94dce4347..66e37810d 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -396,8 +396,9 @@ storage_pools:
 	_, _, err = suite.client.GetStoragePool("second")
 	suite.Req.Equal("not found", err.Error())
 
-	key, _ := daemonConfig["images.auto_update_interval"]
-	suite.Req.NotEqual("15", key.Get())
+	interval, err := cluster.ConfigGetInt64(suite.d.cluster, "images.auto_update_interval")
+	suite.Req.NoError(err)
+	suite.Req.NotEqual(int64(15), interval)
 }
 
 // Updating a storage pool via preseed will fail, since it's not supported

From a7bf1213a1ae096ff3e4afb26bf6d784d954825a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 20 Oct 2017 13:13:28 +0000
Subject: [PATCH 044/227] Add more clustering-related integration tests

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/.dir-locals.el             |   2 +-
 lxd/api_cluster_test.go        | 186 +++++++++++++++++++++++++++++++++++++++--
 lxd/daemon.go                  |  19 +++--
 lxd/daemon_integration_test.go |  20 +++++
 shared/logging/testing.go      |   1 +
 5 files changed, 215 insertions(+), 13 deletions(-)

diff --git a/lxd/.dir-locals.el b/lxd/.dir-locals.el
index 315bd893b..bf09f9074 100644
--- a/lxd/.dir-locals.el
+++ b/lxd/.dir-locals.el
@@ -1,7 +1,7 @@
 ;;; Directory Local Variables
 ;;; For more information see (info "(emacs) Directory Variables")
 ((go-mode
-  . ((go-test-args . "-tags libsqlite3 -timeout 10s")
+  . ((go-test-args . "-tags libsqlite3 -timeout 25s")
      (eval
       . (set
 	 (make-local-variable 'flycheck-go-build-tags)
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index aa096b9d6..858f2433a 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -5,31 +5,203 @@ import (
 	"testing"
 
 	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
+	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
-// A LXD node which is already configured for networking can be coverted to a
+// A LXD node which is already configured for networking can be converted to a
 // single-node LXD cluster.
 func TestCluster_Bootstrap(t *testing.T) {
 	daemon, cleanup := newDaemon(t)
 	defer cleanup()
 
-	client, err := lxd.ConnectLXDUnix(daemon.UnixSocket(), nil)
+	f := clusterFixture{t: t}
+	f.EnableNetworking(daemon, "")
+
+	client := f.ClientUnix(daemon)
+
+	op, err := client.BootstrapCluster("buzz")
 	require.NoError(t, err)
+	require.NoError(t, op.Wait())
+}
 
-	server, _, err := client.GetServer()
+// A LXD node which is already configured for networking can join an existing
+// cluster.
+func TestCluster_Join(t *testing.T) {
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	passwords := []string{"sekret", ""}
+
+	for i, daemon := range daemons {
+		f.EnableNetworking(daemon, passwords[i])
+	}
+
+	// Bootstrap the cluster using the first node.
+	client := f.ClientUnix(daemons[0])
+	op, err := client.BootstrapCluster("buzz")
+	require.NoError(t, err)
+	require.NoError(t, op.Wait())
+
+	// Make the second node join the cluster.
+	address := daemons[0].endpoints.NetworkAddress()
+	cert := daemons[0].endpoints.NetworkPublicKey()
+	client = f.ClientUnix(daemons[1])
+	op, err = client.JoinCluster(address, "sekret", cert, "rusp")
 	require.NoError(t, err)
+	require.NoError(t, op.Wait())
 
-	port, err := shared.AllocatePort()
+	// Both nodes are listed as database nodes in the second node's sqlite
+	// database.
+	state := daemons[1].State()
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		require.NoError(t, err)
+		require.Len(t, nodes, 2)
+		assert.Equal(t, int64(1), nodes[0].ID)
+		assert.Equal(t, int64(2), nodes[1].ID)
+		assert.Equal(t, daemons[0].endpoints.NetworkAddress(), nodes[0].Address)
+		assert.Equal(t, daemons[1].endpoints.NetworkAddress(), nodes[1].Address)
+		return nil
+	})
 	require.NoError(t, err)
+}
 
-	serverPut := server.Writable()
-	serverPut.Config["core.https_address"] = fmt.Sprintf("localhost:%d", port)
+// If the wrong trust password is given, the join request fails.
+func TestCluster_JoinWrongTrustPassword(t *testing.T) {
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	passwords := []string{"sekret", ""}
 
-	require.NoError(t, client.UpdateServer(serverPut, ""))
+	for i, daemon := range daemons {
+		f.EnableNetworking(daemon, passwords[i])
+	}
 
+	// Bootstrap the cluster using the first node.
+	client := f.ClientUnix(daemons[0])
 	op, err := client.BootstrapCluster("buzz")
 	require.NoError(t, err)
 	require.NoError(t, op.Wait())
+
+	// Make the second node join the cluster.
+	address := daemons[0].endpoints.NetworkAddress()
+	cert := daemons[0].endpoints.NetworkPublicKey()
+	client = f.ClientUnix(daemons[1])
+	op, err = client.JoinCluster(address, "noop", cert, "rusp")
+	require.NoError(t, err)
+	assert.EqualError(t, op.Wait(), "failed to request to add node: not authorized")
+}
+
+// In a cluster for 3 nodes, if the leader goes down another one is elected the
+// other two nodes continue to operate fine.
+func TestCluster_Failover(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping cluster failover test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 3)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	// FIXME: here we manually update the raft_nodes table, this can be
+	//        removed when automatic database nodes updating is in place.
+	var nodes []db.RaftNode
+	state := daemons[0].State()
+	err := state.Node.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		nodes, err = tx.RaftNodes()
+		return err
+	})
+	require.NoError(t, err)
+	for _, daemon := range daemons[1:] {
+		state := daemon.State()
+		err := state.Node.Transaction(func(tx *db.NodeTx) error {
+			return tx.RaftNodesReplace(nodes)
+		})
+		require.NoError(t, err)
+	}
+
+	require.NoError(t, daemons[0].Stop())
+
+	for i, daemon := range daemons[1:] {
+		client := f.ClientUnix(daemon)
+		server, _, err := client.GetServer()
+		require.NoError(f.t, err)
+		serverPut := server.Writable()
+		serverPut.Config["core.trust_password"] = fmt.Sprintf("sekret-%d", i)
+
+		require.NoError(f.t, client.UpdateServer(serverPut, ""))
+	}
+}
+
+// Test helper for cluster-related APIs.
+type clusterFixture struct {
+	t       *testing.T
+	clients map[*Daemon]lxd.ContainerServer
+}
+
+// Form a cluster using the given daemons. The first daemon will be the leader.
+func (f *clusterFixture) FormCluster(daemons []*Daemon) {
+	for i, daemon := range daemons {
+		password := ""
+		if i == 0 {
+			password = "sekret"
+		}
+		f.EnableNetworking(daemon, password)
+	}
+
+	// Bootstrap the cluster using the first node.
+	client := f.ClientUnix(daemons[0])
+	op, err := client.BootstrapCluster("buzz")
+	require.NoError(f.t, err)
+	require.NoError(f.t, op.Wait())
+
+	// Make the other nodes join the cluster.
+	address := daemons[0].endpoints.NetworkAddress()
+	cert := daemons[0].endpoints.NetworkPublicKey()
+	for i, daemon := range daemons[1:] {
+		client = f.ClientUnix(daemon)
+		op, err := client.JoinCluster(address, "sekret", cert, fmt.Sprintf("rusp-%d", i))
+		require.NoError(f.t, err)
+		require.NoError(f.t, op.Wait())
+	}
+}
+
+// Enable networking in the given daemon. The password is optional and can be
+// an empty string.
+func (f *clusterFixture) EnableNetworking(daemon *Daemon, password string) {
+	port, err := shared.AllocatePort()
+	require.NoError(f.t, err)
+
+	address := fmt.Sprintf("127.0.0.1:%d", port)
+
+	client := f.ClientUnix(daemon)
+	server, _, err := client.GetServer()
+	require.NoError(f.t, err)
+	serverPut := server.Writable()
+	serverPut.Config["core.https_address"] = address
+	serverPut.Config["core.trust_password"] = password
+
+	require.NoError(f.t, client.UpdateServer(serverPut, ""))
+}
+
+// Get a client for the given daemon connected via UNIX socket, creating one if
+// needed.
+func (f *clusterFixture) ClientUnix(daemon *Daemon) lxd.ContainerServer {
+	if f.clients == nil {
+		f.clients = make(map[*Daemon]lxd.ContainerServer)
+	}
+	client, ok := f.clients[daemon]
+	if !ok {
+		var err error
+		client, err = lxd.ConnectLXDUnix(daemon.UnixSocket(), nil)
+		require.NoError(f.t, err)
+	}
+	return client
 }
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 8582953a4..15e619d42 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"crypto/x509"
 	"database/sql"
+	"database/sql/driver"
 	"fmt"
 	"io"
 	"net/http"
@@ -563,10 +564,10 @@ func (d *Daemon) numRunningContainers() (int, error) {
 
 // Stop stops the shared daemon.
 func (d *Daemon) Stop() error {
-	errors := []error{}
+	errs := []error{}
 	trackError := func(err error) {
 		if err != nil {
-			errors = append(errors, err)
+			errs = append(errs, err)
 		}
 	}
 
@@ -586,7 +587,15 @@ func (d *Daemon) Stop() error {
 		trackError(d.db.Close())
 	}
 	if d.cluster != nil {
-		trackError(d.cluster.Close())
+		err := d.cluster.Close()
+		// If we got io.EOF the network connection was interrupted and
+		// it's likely that the other node shutdown. Let's just log a
+		// warning.
+		if errors.Cause(err) == driver.ErrBadConn {
+			logger.Warnf("Could not close remote database: %v", err)
+		} else {
+			trackError(err)
+		}
 	}
 	if d.gateway != nil {
 		trackError(d.gateway.Shutdown())
@@ -616,12 +625,12 @@ func (d *Daemon) Stop() error {
 	logger.Infof("Saved simplestreams cache")
 
 	var err error
-	if n := len(errors); n > 0 {
+	if n := len(errs); n > 0 {
 		format := "%v"
 		if n > 1 {
 			format += fmt.Sprintf(" (and %d more errors)", n)
 		}
-		err = fmt.Errorf(format, errors[0])
+		err = fmt.Errorf(format, errs[0])
 	}
 	return err
 }
diff --git a/lxd/daemon_integration_test.go b/lxd/daemon_integration_test.go
index f18c0e78c..2012dc657 100644
--- a/lxd/daemon_integration_test.go
+++ b/lxd/daemon_integration_test.go
@@ -45,6 +45,26 @@ func newDaemon(t *testing.T) (*Daemon, func()) {
 	return daemon, cleanup
 }
 
+// Create the given numbers of test Daemon instances.
+//
+// Return a function that can be used to cleanup every associated state.
+func newDaemons(t *testing.T, n int) ([]*Daemon, func()) {
+	daemons := make([]*Daemon, n)
+	cleanups := make([]func(), n)
+
+	for i := 0; i < n; i++ {
+		daemons[i], cleanups[i] = newDaemon(t)
+	}
+
+	cleanup := func() {
+		for _, cleanup := range cleanups {
+			cleanup()
+		}
+	}
+
+	return daemons, cleanup
+}
+
 // Create a new DaemonConfig object for testing purposes.
 func newConfig() *DaemonConfig {
 	return &DaemonConfig{
diff --git a/shared/logging/testing.go b/shared/logging/testing.go
index d92241d8f..22c3a9a90 100644
--- a/shared/logging/testing.go
+++ b/shared/logging/testing.go
@@ -34,5 +34,6 @@ func (h *testingHandler) Log(r *log.Record) error {
 	}
 
 	h.t.Logf("%s %s %s%s", r.Time.Format("15:04:05.000"), r.Lvl, r.Msg, ctx)
+	//fmt.Printf("%s %s %s%s\n", r.Time.Format("15:04:05.000"), r.Lvl, r.Msg, ctx)
 	return nil
 }

From e90bca5d2e4535daa4ee09dc2c6cd1d385ffe729 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 20 Oct 2017 14:08:52 +0000
Subject: [PATCH 045/227] Check cluster TLS certificate in gRPC endpoint

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go         |  5 +++++
 lxd/cluster/gateway_test.go    | 39 ++++++++++++++++++++++++++++++++++++++-
 lxd/cluster/raft.go            | 13 ++-----------
 lxd/cluster/tls.go             | 16 ++++++++++++++++
 lxd/cluster/tls_export_test.go |  4 ++++
 5 files changed, 65 insertions(+), 12 deletions(-)
 create mode 100644 lxd/cluster/tls_export_test.go

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 1c8d65924..af0a9de80 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -86,6 +86,11 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 			return
 		}
 
+		if !tlsCheckCert(r, g.cert) {
+			http.Error(w, "403 invalid client certificate", http.StatusForbidden)
+			return
+		}
+
 		// Before actually establishing the gRPC SQL connection, our
 		// dialer probes the node to see if it's currently the leader
 		// (otherwise it tries with another node or retry later).
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
index cb5c500e2..d2e5afe69 100644
--- a/lxd/cluster/gateway_test.go
+++ b/lxd/cluster/gateway_test.go
@@ -1,13 +1,14 @@
 package cluster_test
 
 import (
+	"fmt"
 	"net/http"
 	"net/http/httptest"
 	"os"
 	"path/filepath"
 	"testing"
 
-	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
@@ -68,6 +69,42 @@ func TestGateway_SingleWithNetworkAddress(t *testing.T) {
 	require.NoError(t, conn.Close())
 }
 
+// When networked, the grpc and raft endpoints requires the cluster
+// certificate.
+func TestGateway_NetworkAuth(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	defer server.Close()
+
+	address := server.Listener.Addr().String()
+	setRaftRole(t, db, address)
+
+	gateway := newGateway(t, db, cert)
+	defer gateway.Shutdown()
+
+	for path, handler := range gateway.HandlerFuncs() {
+		mux.HandleFunc(path, handler)
+	}
+
+	// Make a request using a certificate different than the cluster one.
+	config, err := cluster.TLSClientConfig(shared.TestingAltKeyPair())
+	config.InsecureSkipVerify = true // Skip client-side verification
+	require.NoError(t, err)
+	client := &http.Client{Transport: &http.Transport{TLSClientConfig: config}}
+
+	for path := range gateway.HandlerFuncs() {
+		url := fmt.Sprintf("https://%s%s", address, path)
+		response, err := client.Head(url)
+		require.NoError(t, err)
+		assert.Equal(t, http.StatusForbidden, response.StatusCode)
+	}
+
+}
+
 // Create a new test Gateway with the given parameters, and ensure no error
 // happens.
 func newGateway(t *testing.T, db *db.Node, certInfo *shared.CertInfo) *cluster.Gateway {
diff --git a/lxd/cluster/raft.go b/lxd/cluster/raft.go
index 7db15baf9..20c3ac9f9 100644
--- a/lxd/cluster/raft.go
+++ b/lxd/cluster/raft.go
@@ -2,7 +2,6 @@ package cluster
 
 import (
 	"bytes"
-	"crypto/x509"
 	"fmt"
 	"log"
 	"math"
@@ -22,7 +21,6 @@ import (
 	"github.com/hashicorp/raft-boltdb"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
-	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/log15"
 	"github.com/lxc/lxd/shared/logger"
@@ -360,17 +358,10 @@ func raftHandler(info *shared.CertInfo, handler *rafthttp.Handler) http.HandlerF
 	if handler == nil {
 		return nil
 	}
-	cert, err := x509.ParseCertificate(info.KeyPair().Certificate[0])
-	if err != nil {
-		// Since we have already loaded this certificate, typically
-		// using LoadX509KeyPair, an error should never happen, but
-		// check for good measure.
-		panic(fmt.Sprintf("invalid keypair material: %v", err))
-	}
-	trustedCerts := []x509.Certificate{*cert}
 	return func(w http.ResponseWriter, r *http.Request) {
-		if r.TLS == nil || !util.CheckTrustState(*r.TLS.PeerCertificates[0], trustedCerts) {
+		if !tlsCheckCert(r, info) {
 			http.Error(w, "403 invalid client certificate", http.StatusForbidden)
+			return
 		}
 		handler.ServeHTTP(w, r)
 	}
diff --git a/lxd/cluster/tls.go b/lxd/cluster/tls.go
index aa9b75731..7ed754ec4 100644
--- a/lxd/cluster/tls.go
+++ b/lxd/cluster/tls.go
@@ -3,7 +3,10 @@ package cluster
 import (
 	"crypto/tls"
 	"crypto/x509"
+	"fmt"
+	"net/http"
 
+	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 )
 
@@ -33,3 +36,16 @@ func tlsClientConfig(info *shared.CertInfo) (*tls.Config, error) {
 	}
 	return config, nil
 }
+
+// Return true if the given request is presenting the given cluster certificate.
+func tlsCheckCert(r *http.Request, info *shared.CertInfo) bool {
+	cert, err := x509.ParseCertificate(info.KeyPair().Certificate[0])
+	if err != nil {
+		// Since we have already loaded this certificate, typically
+		// using LoadX509KeyPair, an error should never happen, but
+		// check for good measure.
+		panic(fmt.Sprintf("invalid keypair material: %v", err))
+	}
+	trustedCerts := []x509.Certificate{*cert}
+	return r.TLS != nil && util.CheckTrustState(*r.TLS.PeerCertificates[0], trustedCerts)
+}
diff --git a/lxd/cluster/tls_export_test.go b/lxd/cluster/tls_export_test.go
new file mode 100644
index 000000000..d8248b70a
--- /dev/null
+++ b/lxd/cluster/tls_export_test.go
@@ -0,0 +1,4 @@
+package cluster
+
+// TLSClientConfig is used to generate TLS client configurations in unit tests.
+var TLSClientConfig = tlsClientConfig

From e7469a08bfa96357ad365f79fc391f719d323fd0 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 20 Oct 2017 20:48:07 +0000
Subject: [PATCH 046/227] Add heartbeat logic

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go             |  57 ++++++++++-
 lxd/cluster/gateway_export_test.go |  22 +++++
 lxd/cluster/gateway_test.go        |  22 +++++
 lxd/cluster/heartbeat.go           | 115 ++++++++++++++++++++++
 lxd/cluster/heartbeat_test.go      | 197 +++++++++++++++++++++++++++++++++++++
 lxd/cluster/membership_test.go     |   9 ++
 lxd/cluster/raft.go                |  16 +++
 7 files changed, 435 insertions(+), 3 deletions(-)
 create mode 100644 lxd/cluster/gateway_export_test.go
 create mode 100644 lxd/cluster/heartbeat.go
 create mode 100644 lxd/cluster/heartbeat_test.go

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index af0a9de80..826b11aec 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"net"
 	"net/http"
+	"strconv"
 	"time"
 
 	"github.com/CanonicalLtd/dqlite"
@@ -102,6 +103,24 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 			return
 		}
 
+		// Handle heatbeats.
+		if r.Method == "PUT" {
+			var nodes []db.RaftNode
+			err := shared.ReadToJSON(r.Body, &nodes)
+			if err != nil {
+				http.Error(w, "400 invalid raft nodes payload", http.StatusBadRequest)
+				return
+			}
+			err = g.db.Transaction(func(tx *db.NodeTx) error {
+				return tx.RaftNodesReplace(nodes)
+			})
+			if err != nil {
+				http.Error(w, "500 failed to update raft nodes", http.StatusInternalServerError)
+				return
+			}
+			return
+		}
+
 		g.server.ServeHTTP(w, r)
 	}
 	raft := func(w http.ResponseWriter, r *http.Request) {
@@ -128,7 +147,7 @@ func (g *Gateway) Dialer() grpcsql.Dialer {
 		}
 
 		// Network connection.
-		addresses, err := g.raftNodes()
+		addresses, err := g.cachedRaftNodes()
 		if err != nil {
 			return nil, err
 		}
@@ -208,8 +227,40 @@ func (g *Gateway) waitLeadership() error {
 	return fmt.Errorf("raft node did not self-elect within 5 seconds")
 }
 
-// Return the addresses of the current raft nodes.
-func (g *Gateway) raftNodes() ([]string, error) {
+// Return information about the LXD nodes that a currently part of the raft
+// cluster, as configured in the raft log. It returns an error if this node is
+// not the leader.
+func (g *Gateway) currentRaftNodes() ([]db.RaftNode, error) {
+	if g.raft == nil {
+		return nil, raft.ErrNotLeader
+	}
+	servers, err := g.raft.Servers()
+	if err != nil {
+		return nil, err
+	}
+	provider := raftAddressProvider{db: g.db}
+	nodes := make([]db.RaftNode, len(servers))
+	for i, server := range servers {
+		address, err := provider.ServerAddr(server.ID)
+		if err != nil {
+			return nil, errors.Wrap(err, "failed to fetch raft server address")
+		}
+		id, err := strconv.Atoi(string(server.ID))
+		if err != nil {
+			return nil, errors.Wrap(err, "non-numeric server ID")
+		}
+		nodes[i].ID = int64(id)
+		nodes[i].Address = string(address)
+	}
+	return nodes, nil
+}
+
+// Return the addresses of the raft nodes as stored in the node-level
+// database.
+//
+// These values might leg behind the actual values, and are refreshed
+// periodically during heartbeats.
+func (g *Gateway) cachedRaftNodes() ([]string, error) {
 	var addresses []string
 	err := g.db.Transaction(func(tx *db.NodeTx) error {
 		var err error
diff --git a/lxd/cluster/gateway_export_test.go b/lxd/cluster/gateway_export_test.go
new file mode 100644
index 000000000..6592158db
--- /dev/null
+++ b/lxd/cluster/gateway_export_test.go
@@ -0,0 +1,22 @@
+package cluster
+
+import (
+	"github.com/hashicorp/raft"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/shared"
+)
+
+// Raft returns the gateway's internal raft instance.
+func (g *Gateway) Raft() *raft.Raft {
+	return g.raft.raft
+}
+
+// Cert returns the gateway's internal TLS certificate information.
+func (g *Gateway) Cert() *shared.CertInfo {
+	return g.cert
+}
+
+// RaftNodes returns the nodes currently part of the raft cluster.
+func (g *Gateway) RaftNodes() ([]db.RaftNode, error) {
+	return g.currentRaftNodes()
+}
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
index d2e5afe69..10536978b 100644
--- a/lxd/cluster/gateway_test.go
+++ b/lxd/cluster/gateway_test.go
@@ -9,6 +9,7 @@ import (
 	"testing"
 
 	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/hashicorp/raft"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
@@ -105,6 +106,27 @@ func TestGateway_NetworkAuth(t *testing.T) {
 
 }
 
+// RaftNodes returns an error if the underlying raft instance is not the leader.
+func TestGateway_RaftNodesNotLeader(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	defer server.Close()
+
+	address := server.Listener.Addr().String()
+	setRaftRole(t, db, address)
+
+	gateway := newGateway(t, db, cert)
+	defer gateway.Shutdown()
+
+	// Get the node immediately, before the election has took place.
+	_, err := gateway.RaftNodes()
+	assert.Equal(t, raft.ErrNotLeader, err)
+}
+
 // Create a new test Gateway with the given parameters, and ensure no error
 // happens.
 func newGateway(t *testing.T, db *db.Node, certInfo *shared.CertInfo) *cluster.Gateway {
diff --git a/lxd/cluster/heartbeat.go b/lxd/cluster/heartbeat.go
new file mode 100644
index 000000000..6cf302773
--- /dev/null
+++ b/lxd/cluster/heartbeat.go
@@ -0,0 +1,115 @@
+package cluster
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/hashicorp/raft"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/task"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
+)
+
+// Heartbeat returns a task function that performs leader-initiated heartbeat
+// checks against all LXD nodes in the cluster.
+//
+// It will update the heartbeat timestamp column of the nodes table
+// accordingly, and also notify them of the current list of database nodes.
+func Heartbeat(gateway *Gateway, cluster *db.Cluster) task.Func {
+	heartbeat := func(ctx context.Context) {
+		if gateway.server == nil || gateway.memoryDial != nil {
+			// We're not a raft node or we're not clustered
+			return
+		}
+
+		raftNodes, err := gateway.currentRaftNodes()
+		if err == raft.ErrNotLeader {
+			return
+		}
+		if err != nil {
+			logger.Warnf("Failed to get current raft nodes: %v", err)
+			return
+		}
+		var nodes []db.NodeInfo
+		err = cluster.Transaction(func(tx *db.ClusterTx) error {
+			var err error
+			nodes, err = tx.Nodes()
+			return err
+		})
+		wg := sync.WaitGroup{}
+		wg.Add(len(nodes))
+		heartbeats := make([]time.Time, len(nodes))
+		for i, node := range nodes {
+			go func(i int, address string) {
+				defer wg.Done()
+				err := heartbeatNode(ctx, address, gateway.cert, raftNodes)
+				if err == nil {
+					heartbeats[i] = time.Now()
+				}
+			}(i, node.Address)
+		}
+		wg.Wait()
+
+		// If the context has been cancelled, return immediately.
+		if ctx.Err() != nil {
+			return
+		}
+
+		err = cluster.Transaction(func(tx *db.ClusterTx) error {
+			for i, node := range nodes {
+				if heartbeats[i].Equal(time.Time{}) {
+					continue
+				}
+				err := tx.NodeHeartbeat(node.Address, heartbeats[i])
+				if err != nil {
+					return err
+				}
+			}
+			return nil
+		})
+		if err != nil {
+			logger.Warnf("Failed to update heartbeat: %v", err)
+		}
+	}
+	return heartbeat
+}
+
+// Perform a single heartbeat request against the node with the given address.
+func heartbeatNode(ctx context.Context, address string, cert *shared.CertInfo, raftNodes []db.RaftNode) error {
+	config, err := tlsClientConfig(cert)
+	if err != nil {
+		return err
+	}
+	url := fmt.Sprintf("https://%s%s", address, grpcEndpoint)
+	client := &http.Client{Transport: &http.Transport{TLSClientConfig: config}}
+
+	buffer := bytes.Buffer{}
+	err = json.NewEncoder(&buffer).Encode(raftNodes)
+	if err != nil {
+		return err
+	}
+
+	request, err := http.NewRequest("PUT", url, bytes.NewReader(buffer.Bytes()))
+	if err != nil {
+		return err
+	}
+	request = request.WithContext(ctx)
+	request.Close = true // Immediately close the connection after the request is done
+
+	response, err := client.Do(request)
+	if err != nil {
+		return errors.Wrap(err, "failed to send HTTP request")
+	}
+	defer response.Body.Close()
+	if response.StatusCode != http.StatusOK {
+		return fmt.Errorf("HTTP request failed: %s", response.Status)
+	}
+	return nil
+}
diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
new file mode 100644
index 000000000..7b8bf91ff
--- /dev/null
+++ b/lxd/cluster/heartbeat_test.go
@@ -0,0 +1,197 @@
+package cluster_test
+
+import (
+	"net/http"
+	"net/http/httptest"
+	"testing"
+	"time"
+
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"golang.org/x/net/context"
+)
+
+// After a heartbeat request is completed, the leader updates the heartbeat
+// timestamp column, and the serving node updates its cache of raft nodes.
+func TestHeartbeat(t *testing.T) {
+	f := heartbeatFixture{t: t}
+	defer f.Cleanup()
+
+	gateway0 := f.Bootstrap()
+	gateway1 := f.Grow()
+	f.Grow()
+
+	state0 := f.State(gateway0)
+	state1 := f.State(gateway1)
+
+	// Artificially mark all nodes as down
+	err := state0.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		for _, node := range nodes {
+			err := tx.NodeHeartbeat(node.Address, time.Now().Add(-time.Minute))
+			require.NoError(t, err)
+		}
+		return nil
+	})
+	require.NoError(t, err)
+
+	// Perform the heartbeat requests.
+	heartbeat := cluster.Heartbeat(gateway0, state0.Cluster)
+	ctx := context.Background()
+	heartbeat(ctx)
+
+	// The second node that initially did not know about the third, now
+	// does.
+	err = state1.Node.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		require.NoError(t, err)
+		assert.Len(t, nodes, 3)
+		return nil
+	})
+	require.NoError(t, err)
+
+	// The heartbeat timestamps of all nodes got updated
+	err = state0.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		for _, node := range nodes {
+			assert.False(t, node.IsDown())
+		}
+		return nil
+	})
+	require.NoError(t, err)
+}
+
+// If a certain node does not successfully respond to the heartbeat, its
+// timestamp does not get updated.
+func TestHeartbeat_MarkAsDown(t *testing.T) {
+	f := heartbeatFixture{t: t}
+	defer f.Cleanup()
+
+	gateway0 := f.Bootstrap()
+	gateway1 := f.Grow()
+
+	state0 := f.State(gateway0)
+
+	// Artificially mark all nodes as down
+	err := state0.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		for _, node := range nodes {
+			err := tx.NodeHeartbeat(node.Address, time.Now().Add(-time.Minute))
+			require.NoError(t, err)
+		}
+		return nil
+	})
+	require.NoError(t, err)
+
+	// Shutdown the second node and perform the heartbeat requests.
+	f.Server(gateway1).Close()
+	heartbeat := cluster.Heartbeat(gateway0, state0.Cluster)
+	ctx := context.Background()
+	heartbeat(ctx)
+
+	// The heartbeat timestamp of the second node did not get updated
+	err = state0.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		assert.True(t, nodes[1].IsDown())
+		return nil
+	})
+	require.NoError(t, err)
+}
+
+// Helper for testing heartbeat-related code.
+type heartbeatFixture struct {
+	t        *testing.T
+	gateways map[int]*cluster.Gateway              // node index to gateway
+	states   map[*cluster.Gateway]*state.State     // gateway to its state handle
+	servers  map[*cluster.Gateway]*httptest.Server // gateway to its HTTP server
+	cleanups []func()
+}
+
+// Bootstrap the first node of the cluster.
+func (f *heartbeatFixture) Bootstrap() *cluster.Gateway {
+	state, gateway, _ := f.node()
+
+	err := cluster.Bootstrap(state, gateway, "buzz")
+	require.NoError(f.t, err)
+
+	return gateway
+}
+
+// Grow adds a new node to the cluster.
+func (f *heartbeatFixture) Grow() *cluster.Gateway {
+	state, gateway, address := f.node()
+	name := address
+
+	target := f.gateways[0]
+	targetState := f.states[target]
+
+	nodes, err := cluster.Accept(
+		targetState, name, address, cluster.SchemaVersion, len(version.APIExtensions))
+
+	err = cluster.Join(state, gateway, target.Cert(), name, nodes)
+	require.NoError(f.t, err)
+
+	return gateway
+}
+
+// Return the state associated with the given gateway.
+func (f *heartbeatFixture) State(gateway *cluster.Gateway) *state.State {
+	return f.states[gateway]
+}
+
+// Return the HTTP server associated with the given gateway.
+func (f *heartbeatFixture) Server(gateway *cluster.Gateway) *httptest.Server {
+	return f.servers[gateway]
+}
+
+// Creates a new node, without either bootstrapping or joining it.
+//
+// Return the associated gateway and network address.
+func (f *heartbeatFixture) node() (*state.State, *cluster.Gateway, string) {
+	if f.gateways == nil {
+		f.gateways = make(map[int]*cluster.Gateway)
+		f.states = make(map[*cluster.Gateway]*state.State)
+		f.servers = make(map[*cluster.Gateway]*httptest.Server)
+	}
+
+	state, cleanup := state.NewTestState(f.t)
+	f.cleanups = append(f.cleanups, cleanup)
+
+	cert := shared.TestingKeyPair()
+	gateway := newGateway(f.t, state.Node, cert)
+	f.cleanups = append(f.cleanups, func() { gateway.Shutdown() })
+
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	f.cleanups = append(f.cleanups, server.Close)
+
+	for path, handler := range gateway.HandlerFuncs() {
+		mux.HandleFunc(path, handler)
+	}
+
+	address := server.Listener.Addr().String()
+	mf := &membershipFixtures{t: f.t, state: state}
+	mf.NetworkAddress(address)
+
+	f.gateways[len(f.gateways)] = gateway
+	f.states[gateway] = state
+	f.servers[gateway] = server
+
+	return state, gateway, address
+}
+
+func (f *heartbeatFixture) Cleanup() {
+	// Run the cleanups in reverse order
+	for i := len(f.cleanups) - 1; i >= 0; i-- {
+		f.cleanups[i]()
+	}
+}
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 70e3ad224..83b8a5576 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -292,6 +292,15 @@ func TestJoin(t *testing.T) {
 	// Actually join the cluster.
 	err = cluster.Join(state, gateway, targetCert, "rusp", nodes)
 	require.NoError(t, err)
+
+	// The leader now returns an updated list of raft nodes.
+	nodes, err = targetGateway.RaftNodes()
+	require.NoError(t, err)
+	assert.Len(t, nodes, 2)
+	assert.Equal(t, int64(1), nodes[0].ID)
+	assert.Equal(t, targetAddress, nodes[0].Address)
+	assert.Equal(t, int64(2), nodes[1].ID)
+	assert.Equal(t, address, nodes[1].Address)
 }
 
 // Helper for setting fixtures for Bootstrap tests.
diff --git a/lxd/cluster/raft.go b/lxd/cluster/raft.go
index 20c3ac9f9..ebdbb1efd 100644
--- a/lxd/cluster/raft.go
+++ b/lxd/cluster/raft.go
@@ -207,6 +207,22 @@ func (i *raftInstance) Raft() *raft.Raft {
 	return i.raft
 }
 
+// Servers returns the servers that are currently part of the cluster.
+//
+// If this raft instance is not the leader, an error is returned.
+func (i *raftInstance) Servers() ([]raft.Server, error) {
+	if i.raft.State() != raft.Leader {
+		return nil, raft.ErrNotLeader
+	}
+	future := i.raft.GetConfiguration()
+	err := future.Error()
+	if err != nil {
+		return nil, err
+	}
+	configuration := future.Configuration()
+	return configuration.Servers, nil
+}
+
 // HandlerFunc can be used to handle HTTP requests performed against the LXD
 // API RaftEndpoint ("/internal/raft"), in order to join/leave/form the raft
 // cluster.

From 9342daa8ec3a66acb10f6925fec1eb831ace35c0 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 20 Oct 2017 20:56:47 +0000
Subject: [PATCH 047/227] Plug heartbeat logic into the Daemon

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster_test.go       | 18 ------------------
 lxd/cluster/gateway.go        | 15 +++++++++------
 lxd/cluster/heartbeat.go      | 11 +++++++++--
 lxd/cluster/heartbeat_test.go |  4 ++--
 lxd/daemon.go                 | 32 +++++++++++++++++---------------
 5 files changed, 37 insertions(+), 43 deletions(-)

diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 858f2433a..27c4ac162 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -109,24 +109,6 @@ func TestCluster_Failover(t *testing.T) {
 	f := clusterFixture{t: t}
 	f.FormCluster(daemons)
 
-	// FIXME: here we manually update the raft_nodes table, this can be
-	//        removed when automatic database nodes updating is in place.
-	var nodes []db.RaftNode
-	state := daemons[0].State()
-	err := state.Node.Transaction(func(tx *db.NodeTx) error {
-		var err error
-		nodes, err = tx.RaftNodes()
-		return err
-	})
-	require.NoError(t, err)
-	for _, daemon := range daemons[1:] {
-		state := daemon.State()
-		err := state.Node.Transaction(func(tx *db.NodeTx) error {
-			return tx.RaftNodesReplace(nodes)
-		})
-		require.NoError(t, err)
-	}
-
 	require.NoError(t, daemons[0].Stop())
 
 	for i, daemon := range daemons[1:] {
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 826b11aec..243ec27b4 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -146,23 +146,26 @@ func (g *Gateway) Dialer() grpcsql.Dialer {
 			return g.memoryDial()
 		}
 
-		// Network connection.
-		addresses, err := g.cachedRaftNodes()
-		if err != nil {
-			return nil, err
-		}
-
 		// FIXME: timeout should be configurable
+		var err error
 		remaining := 10 * time.Second
 		for remaining > 0 {
+			// Network connection.
+			addresses, dbErr := g.cachedRaftNodes()
+			if dbErr != nil {
+				return nil, dbErr
+			}
+
 			for _, address := range addresses {
 				var conn *grpc.ClientConn
 				conn, err = grpcNetworkDial(address, g.cert, time.Second)
 				if err == nil {
 					return conn, nil
 				}
+				logger.Debugf("Failed to establish gRPC connection with %s: %v", address, err)
 			}
 			time.Sleep(250 * time.Millisecond)
+			remaining -= 250 * time.Millisecond
 		}
 		return nil, err
 	}
diff --git a/lxd/cluster/heartbeat.go b/lxd/cluster/heartbeat.go
index 6cf302773..798d81bd9 100644
--- a/lxd/cluster/heartbeat.go
+++ b/lxd/cluster/heartbeat.go
@@ -22,12 +22,13 @@ import (
 //
 // It will update the heartbeat timestamp column of the nodes table
 // accordingly, and also notify them of the current list of database nodes.
-func Heartbeat(gateway *Gateway, cluster *db.Cluster) task.Func {
+func Heartbeat(gateway *Gateway, cluster *db.Cluster) (task.Func, task.Schedule) {
 	heartbeat := func(ctx context.Context) {
 		if gateway.server == nil || gateway.memoryDial != nil {
 			// We're not a raft node or we're not clustered
 			return
 		}
+		logger.Debugf("Starting heartbeat round")
 
 		raftNodes, err := gateway.currentRaftNodes()
 		if err == raft.ErrNotLeader {
@@ -51,7 +52,10 @@ func Heartbeat(gateway *Gateway, cluster *db.Cluster) task.Func {
 				defer wg.Done()
 				err := heartbeatNode(ctx, address, gateway.cert, raftNodes)
 				if err == nil {
+					logger.Debugf("Successful heartbeat for %s", address)
 					heartbeats[i] = time.Now()
+				} else {
+					logger.Debugf("Failed heartbeat for %s: %v", address, err)
 				}
 			}(i, node.Address)
 		}
@@ -78,7 +82,10 @@ func Heartbeat(gateway *Gateway, cluster *db.Cluster) task.Func {
 			logger.Warnf("Failed to update heartbeat: %v", err)
 		}
 	}
-	return heartbeat
+
+	schedule := task.Every(3 * time.Second)
+
+	return heartbeat, schedule
 }
 
 // Perform a single heartbeat request against the node with the given address.
diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
index 7b8bf91ff..d129264d5 100644
--- a/lxd/cluster/heartbeat_test.go
+++ b/lxd/cluster/heartbeat_test.go
@@ -42,7 +42,7 @@ func TestHeartbeat(t *testing.T) {
 	require.NoError(t, err)
 
 	// Perform the heartbeat requests.
-	heartbeat := cluster.Heartbeat(gateway0, state0.Cluster)
+	heartbeat, _ := cluster.Heartbeat(gateway0, state0.Cluster)
 	ctx := context.Background()
 	heartbeat(ctx)
 
@@ -93,7 +93,7 @@ func TestHeartbeat_MarkAsDown(t *testing.T) {
 
 	// Shutdown the second node and perform the heartbeat requests.
 	f.Server(gateway1).Close()
-	heartbeat := cluster.Heartbeat(gateway0, state0.Cluster)
+	heartbeat, _ := cluster.Heartbeat(gateway0, state0.Cluster)
 	ctx := context.Background()
 	heartbeat(ctx)
 
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 15e619d42..84d30b51c 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -509,25 +509,27 @@ func (d *Daemon) init() error {
 }
 
 func (d *Daemon) Ready() error {
-	/* Prune images */
-	d.taskPruneImages = d.tasks.Add(pruneExpiredImagesTask(d))
-
-	/* Auto-update images */
-	d.taskAutoUpdate = d.tasks.Add(autoUpdateImagesTask(d))
+	/* Heartbeats */
+	d.tasks.Add(cluster.Heartbeat(d.gateway, d.cluster))
+
+	// FIXME: There's no hard reason for which we should not run these
+	//        tasks in mock mode. However it requires that we tweak them so
+	//        they exit gracefully without blocking (something we should do
+	//        anyways) and they don't hit the internet or similar. Support
+	//        for proper cancellation is something that has been started
+	//        but has not been fully completed.
+	if !d.os.MockMode {
+		d.taskPruneImages = d.tasks.Add(pruneExpiredImagesTask(d))
 
-	/* Auto-update instance types */
-	d.tasks.Add(instanceRefreshTypesTask(d))
+		/* Auto-update images */
+		d.taskAutoUpdate = d.tasks.Add(autoUpdateImagesTask(d))
 
-	// FIXME: There's no hard reason for which we should not run tasks in
-	//        mock mode. However it requires that we tweak the tasks so
-	//        they exit gracefully without blocking (something we should
-	//        do anyways) and they don't hit the internet or similar. Support
-	//        for proper cancellation is something that has been started but
-	//        has not been fully completed.
-	if !d.os.MockMode {
-		d.tasks.Start()
+		/* Auto-update instance types */
+		d.tasks.Add(instanceRefreshTypesTask(d))
 	}
 
+	d.tasks.Start()
+
 	s := d.State()
 
 	/* Restore containers */

From 7c8192800230635bcda2b111e4068cd44b040607 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 20 Oct 2017 21:34:02 +0000
Subject: [PATCH 048/227] Add test for joining a cluster interactively with lxd
 init

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/main_init_test.go | 40 +++++++++++++++++++++++++++++++++++++++-
 1 file changed, 39 insertions(+), 1 deletion(-)

diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 66e37810d..13af4cf48 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -153,6 +153,36 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClustering() {
 	suite.Req.True(shared.PathExists(certfile))
 }
 
+// Enable clustering interactively, joining an existing cluser.
+func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
+	leader, cleanup := newDaemon(suite.T())
+	defer cleanup()
+
+	f := clusterFixture{t: suite.T()}
+	f.FormCluster([]*Daemon{leader})
+
+	suite.command.PasswordReader = func(int) ([]byte, error) {
+		return []byte("sekret"), nil
+	}
+	port, err := shared.AllocatePort()
+	suite.Req.Nil(err)
+	answers := &cmdInitAnswers{
+		WantClustering:           true,
+		ClusterName:              "rusp",
+		ClusterAddress:           fmt.Sprintf("127.0.0.1:%d", port),
+		WantJoinCluster:          true,
+		ClusterTargetNodeAddress: leader.endpoints.NetworkAddress(),
+		ClusterAcceptFingerprint: true,
+		ClusterConfirmLosingData: true,
+	}
+	answers.Render(suite.streams)
+
+	suite.Req.Nil(suite.command.Run())
+	state := suite.d.State()
+	certfile := filepath.Join(state.OS.VarDir, "cluster.crt")
+	suite.Req.True(shared.PathExists(certfile))
+}
+
 // Pass network address and trust password via command line arguments.
 func (suite *cmdInitTestSuite) TestCmdInit_AutoHTTPSAddressAndTrustPassword() {
 	port, err := shared.AllocatePort()
@@ -716,9 +746,12 @@ func (suite *cmdInitTestSuite) TestCmdInit_ProfilesPreseedUpdate() {
 // sequence of answers.
 type cmdInitAnswers struct {
 	WantClustering           bool
-	WantJoinCluster          bool
 	ClusterName              string
 	ClusterAddress           string
+	WantJoinCluster          bool
+	ClusterTargetNodeAddress string
+	ClusterAcceptFingerprint bool
+	ClusterConfirmLosingData bool
 	WantStoragePool          bool
 	WantAvailableOverNetwork bool
 	BindToAddress            string
@@ -738,6 +771,11 @@ func (answers *cmdInitAnswers) Render(streams *cmd.MemoryStreams) {
 		streams.InputAppendLine(answers.ClusterName)
 		streams.InputAppendLine(answers.ClusterAddress)
 		streams.InputAppendBoolAnswer(answers.WantJoinCluster)
+		if answers.WantJoinCluster {
+			streams.InputAppendLine(answers.ClusterTargetNodeAddress)
+			streams.InputAppendBoolAnswer(answers.ClusterAcceptFingerprint)
+			streams.InputAppendBoolAnswer(answers.ClusterConfirmLosingData)
+		}
 	}
 	streams.InputAppendBoolAnswer(answers.WantStoragePool)
 	if !answers.WantClustering {

From 2bd9d9b2289cb61ee1d7b96a4de16170fbfcc045 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 21 Oct 2017 09:04:31 +0000
Subject: [PATCH 049/227] Notify other nodes of config changes received via
 REST API

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go           | 76 ++++++++++++++++++++++++++++++++++++++++--------
 lxd/api_cluster_test.go  | 14 +++++++++
 lxd/daemon.go            |  8 ++++-
 lxd/db/db.go             |  1 +
 lxd/endpoints/network.go |  8 +++++
 5 files changed, 94 insertions(+), 13 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 36e1fd62f..54602c6c3 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -6,6 +6,7 @@ import (
 
 	"gopkg.in/lxc/go-lxc.v2"
 
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/config"
 	"github.com/lxc/lxd/lxd/db"
@@ -166,20 +167,43 @@ func api10Get(d *Daemon, r *http.Request) Response {
 }
 
 func api10Put(d *Daemon, r *http.Request) Response {
+	req := api.ServerPut{}
+	if err := shared.ReadToJSON(r.Body, &req); err != nil {
+		return BadRequest(err)
+	}
+
+	// If this is a notification from a cluster node, just run the triggers
+	// for reacting to the values that changed.
+	if r.Header.Get("User-Agent") == "lxd-cluster-notifier" {
+		changed := make(map[string]string)
+		for key, value := range req.Config {
+			changed[key] = value.(string)
+		}
+		var config *cluster.Config
+		err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+			var err error
+			config, err = cluster.ConfigLoad(tx)
+			return err
+		})
+		if err != nil {
+			return SmartError(err)
+		}
+		err = doApi10UpdateTriggers(d, changed, config)
+		if err != nil {
+			return SmartError(err)
+		}
+		return EmptySyncResponse
+	}
+
 	render, err := daemonConfigRender(d.State())
 	if err != nil {
-		return InternalError(err)
+		return SmartError(err)
 	}
 	err = util.EtagCheck(r, render)
 	if err != nil {
 		return PreconditionFailed(err)
 	}
 
-	req := api.ServerPut{}
-	if err := shared.ReadToJSON(r.Body, &req); err != nil {
-		return BadRequest(err)
-	}
-
 	return doApi10Update(d, req, false)
 }
 
@@ -261,6 +285,35 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 		}
 	}
 
+	notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAlive)
+	if err != nil {
+		return SmartError(err)
+	}
+	err = notifier(func(client lxd.ContainerServer) error {
+		server, etag, err := client.GetServer()
+		if err != nil {
+			return err
+		}
+		serverPut := server.Writable()
+		serverPut.Config = make(map[string]interface{})
+		for key, value := range changed {
+			serverPut.Config[key] = value
+		}
+		return client.UpdateServer(serverPut, etag)
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+
+	err = doApi10UpdateTriggers(d, changed, newConfig)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	return EmptySyncResponse
+}
+
+func doApi10UpdateTriggers(d *Daemon, changed map[string]string, config *cluster.Config) error {
 	maasControllerChanged := false
 	for key, value := range changed {
 		switch key {
@@ -269,7 +322,7 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 		case "core.proxy_https":
 			fallthrough
 		case "core.proxy_ignore_hosts":
-			daemonConfigSetProxy(d, newConfig)
+			daemonConfigSetProxy(d, config)
 		case "maas.api.url":
 			fallthrough
 		case "maas.api.key":
@@ -279,7 +332,7 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 		case "core.macaroon.endpoint":
 			err := d.setupExternalAuthentication(value)
 			if err != nil {
-				return SmartError(err)
+				return err
 			}
 		case "images.auto_update_interval":
 			d.taskAutoUpdate.Reset()
@@ -288,14 +341,13 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 		}
 	}
 	if maasControllerChanged {
-		url, key, machine := newConfig.MAASController()
+		url, key, machine := config.MAASController()
 		err := d.setupMAASController(url, key, machine)
 		if err != nil {
-			return SmartError(err)
+			return err
 		}
 	}
-
-	return EmptySyncResponse
+	return nil
 }
 
 var api10Cmd = Command{name: "", untrustedGet: true, get: api10Get, put: api10Put, patch: api10Patch}
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 27c4ac162..02f5e7fc4 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -68,6 +68,18 @@ func TestCluster_Join(t *testing.T) {
 		return nil
 	})
 	require.NoError(t, err)
+
+	// Changing the configuration on the second node also updates it on the
+	// first, via internal notifications.
+	server, _, err := client.GetServer()
+	require.NoError(t, err)
+	serverPut := server.Writable()
+	serverPut.Config["core.macaroon.endpoint"] = "foo.bar"
+	require.NoError(t, client.UpdateServer(serverPut, ""))
+
+	for _, daemon := range daemons {
+		assert.NotNil(t, daemon.externalAuth)
+	}
 }
 
 // If the wrong trust password is given, the join request fails.
@@ -112,12 +124,14 @@ func TestCluster_Failover(t *testing.T) {
 	require.NoError(t, daemons[0].Stop())
 
 	for i, daemon := range daemons[1:] {
+		t.Logf("Invoking GetServer API against daemon %d", i)
 		client := f.ClientUnix(daemon)
 		server, _, err := client.GetServer()
 		require.NoError(f.t, err)
 		serverPut := server.Writable()
 		serverPut.Config["core.trust_password"] = fmt.Sprintf("sekret-%d", i)
 
+		t.Logf("Invoking UpdateServer API against daemon %d", i)
 		require.NoError(f.t, client.UpdateServer(serverPut, ""))
 	}
 }
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 84d30b51c..15594c733 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -139,11 +139,17 @@ func (d *Daemon) checkTrustedClient(r *http.Request) error {
 		return err
 	}
 
+	// Add the server or cluster certificate to the list of trusted ones.
+	cert, _ := x509.ParseCertificate(d.endpoints.NetworkCert().KeyPair().Certificate[0])
+	certs := d.clientCerts
+	certs = append(certs, *cert)
+
 	for i := range r.TLS.PeerCertificates {
-		if util.CheckTrustState(*r.TLS.PeerCertificates[i], d.clientCerts) {
+		if util.CheckTrustState(*r.TLS.PeerCertificates[i], certs) {
 			return nil
 		}
 	}
+
 	return fmt.Errorf("unauthorized")
 }
 
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 420257762..e21c429aa 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -195,6 +195,7 @@ func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 			// FIXME: we should bubble errors using errors.Wrap()
 			// instead, and check for sql.ErrBadConnection.
 			if strings.Contains(err.Error(), "bad connection") {
+				logger.Debugf("Retry failed transaction")
 				time.Sleep(time.Second)
 				continue
 			}
diff --git a/lxd/endpoints/network.go b/lxd/endpoints/network.go
index 6d6ddb42d..b965c50b3 100644
--- a/lxd/endpoints/network.go
+++ b/lxd/endpoints/network.go
@@ -31,6 +31,14 @@ func (e *Endpoints) NetworkPrivateKey() []byte {
 	return e.cert.PrivateKey()
 }
 
+// NetworkCert returns the full TLS certificate information for this endpoint.
+func (e *Endpoints) NetworkCert() *shared.CertInfo {
+	e.mu.RLock()
+	defer e.mu.RUnlock()
+
+	return e.cert
+}
+
 // NetworkAddress returns the network addresss of the network endpoint, or an
 // empty string if there's no network endpoint
 func (e *Endpoints) NetworkAddress() string {

From 3e9410c1464a9a20d812a74478161e504f6247ee Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 23 Oct 2017 11:09:05 +0000
Subject: [PATCH 050/227] Add networks and networks_config table to cluster db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/open.go        |  2 +-
 lxd/db/cluster/schema.go      | 14 +++++++++++
 lxd/db/cluster/update.go      | 14 +++++++++++
 lxd/db/cluster/update_test.go | 57 ++++++++++++++++++++++++++++++++++++++++---
 lxd/db/schema/schema.go       |  2 +-
 5 files changed, 83 insertions(+), 6 deletions(-)

diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index f9b3139e7..03fe5ece9 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -29,7 +29,7 @@ func Open(name string, dialer grpcsql.Dialer) (*sql.DB, error) {
 	if name == "" {
 		name = "db.bin"
 	}
-	db, err := sql.Open(driverName, name)
+	db, err := sql.Open(driverName, name+"?_foreign_keys=1")
 	if err != nil {
 		return nil, fmt.Errorf("cannot open cluster database: %v", err)
 	}
diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 76302fbf7..3354e36b2 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -12,6 +12,20 @@ CREATE TABLE config (
     value TEXT,
     UNIQUE (key)
 );
+CREATE TABLE networks (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE networks_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    network_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (network_id, key),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
+);
 CREATE TABLE nodes (
     id INTEGER PRIMARY KEY,
     name TEXT NOT NULL,
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 33006db06..967ac40f4 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -36,6 +36,20 @@ CREATE TABLE config (
     value TEXT,
     UNIQUE (key)
 );
+CREATE TABLE networks (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE networks_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    network_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (network_id, key),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
+);
 `
 	_, err := tx.Exec(stmt)
 	return err
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index f637f5083..f56f63e11 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -5,6 +5,7 @@ import (
 	"time"
 
 	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
@@ -13,19 +14,22 @@ func TestUpdateFromV0(t *testing.T) {
 	db, err := schema.ExerciseUpdate(1, nil)
 	require.NoError(t, err)
 
-	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'foo', 'blah', '1.2.3.4:666', 1, 32, ?)", time.Now())
+	stmt := "INSERT INTO nodes VALUES (1, 'foo', 'blah', '1.2.3.4:666', 1, 32, ?)"
+	_, err = db.Exec(stmt, time.Now())
 	require.NoError(t, err)
 
 	// Unique constraint on name
-	_, err = db.Exec("INSERT INTO nodes VALUES (2, 'foo', 'gosh', '5.6.7.8:666', 5, 20, ?)", time.Now())
+	stmt = "INSERT INTO nodes VALUES (2, 'foo', 'gosh', '5.6.7.8:666', 5, 20, ?)"
+	_, err = db.Exec(stmt, time.Now())
 	require.Error(t, err)
 
 	// Unique constraint on address
-	_, err = db.Exec("INSERT INTO nodes VALUES (3, 'bar', 'gasp', '1.2.3.4:666', 9, 11), ?)", time.Now())
+	stmt = "INSERT INTO nodes VALUES (3, 'bar', 'gasp', '1.2.3.4:666', 9, 11), ?)"
+	_, err = db.Exec(stmt, time.Now())
 	require.Error(t, err)
 }
 
-func TestUpdateFromV1(t *testing.T) {
+func TestUpdateFromV1_Config(t *testing.T) {
 	schema := cluster.Schema()
 	db, err := schema.ExerciseUpdate(2, nil)
 	require.NoError(t, err)
@@ -37,3 +41,48 @@ func TestUpdateFromV1(t *testing.T) {
 	_, err = db.Exec("INSERT INTO config VALUES (2, 'foo', 'gosh')")
 	require.Error(t, err)
 }
+
+func TestUpdateFromV1_Network(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(2, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah')")
+	require.NoError(t, err)
+
+	// Unique constraint on name.
+	_, err = db.Exec("INSERT INTO networks VALUES (2, 'foo', 'gosh')")
+	require.Error(t, err)
+}
+
+func TestUpdateFromV1_NetworkConfig(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(2, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah')")
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO networks_config VALUES (1, 1, 'bar', 'baz')")
+	require.NoError(t, err)
+
+	// Unique constraint on network_id/key.
+	_, err = db.Exec("INSERT INTO networks_config VALUES (2, 1, 'bar', 'egg')")
+	require.Error(t, err)
+
+	// Reference constraint on network_id.
+	_, err = db.Exec("INSERT INTO networks_config VALUES (3, , 'fuz', 'buz')")
+	require.Error(t, err)
+
+	// Cascade deletes
+	result, err := db.Exec("DELETE FROM networks")
+	require.NoError(t, err)
+	n, err := result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), n)
+	result, err = db.Exec("DELETE FROM networks_config")
+	require.NoError(t, err)
+	n, err = result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(0), n) // The row was already deleted by the previous query
+}
diff --git a/lxd/db/schema/schema.go b/lxd/db/schema/schema.go
index ba798984c..7015977c4 100644
--- a/lxd/db/schema/schema.go
+++ b/lxd/db/schema/schema.go
@@ -226,7 +226,7 @@ func (s *Schema) Trim(version int) []Update {
 // inspection of the resulting state.
 func (s *Schema) ExerciseUpdate(version int, hook func(*sql.DB)) (*sql.DB, error) {
 	// Create an in-memory database.
-	db, err := sql.Open("sqlite3", ":memory:")
+	db, err := sql.Open("sqlite3", ":memory:?_foreign_keys=1")
 	if err != nil {
 		return nil, fmt.Errorf("failed to open memory database: %v", err)
 	}

From c82b9bafbfaff1de7bf41105316398b01c0f8502 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 23 Oct 2017 12:29:33 +0000
Subject: [PATCH 051/227] Migrate networks data from node to cluster database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/migration.go            | 22 ++++++++++++++++-----
 lxd/db/migration_test.go       | 22 ++++++++++++++++++++-
 lxd/db/networks.go             | 44 +++++++++++++++++++++---------------------
 lxd/db/node/schema.go          | 14 --------------
 lxd/db/node/update.go          |  3 +++
 lxd/db/node/update_test.go     | 30 +++++++++++++++++++++++++++-
 lxd/devices.go                 |  2 +-
 lxd/networks.go                | 26 ++++++++++++-------------
 lxd/networks_utils.go          | 10 +++++-----
 lxd/patches.go                 |  4 ++--
 test/includes/lxd.sh           |  8 ++++++--
 test/suites/database_update.sh |  4 ++--
 12 files changed, 121 insertions(+), 68 deletions(-)

diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index af9284d10..737d3b11b 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -18,14 +18,11 @@ import (
 // (regardless of whether clustering is actually on or off).
 func LoadPreClusteringData(tx *sql.Tx) (*Dump, error) {
 	// Dump all tables.
-	tables := []string{
-		"config",
-	}
 	dump := &Dump{
 		Schema: map[string][]string{},
 		Data:   map[string][][]interface{}{},
 	}
-	for _, table := range tables {
+	for _, table := range preClusteringTables {
 		data := [][]interface{}{}
 		stmt := fmt.Sprintf("SELECT * FROM %s", table)
 		rows, err := tx.Query(stmt)
@@ -68,10 +65,19 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 		return errors.Wrap(err, "failed to start cluster database transaction")
 	}
 
-	for table, columns := range dump.Schema {
+	for _, table := range preClusteringTables {
+		columns := dump.Schema[table]
 		stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
 		stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
 		for i, row := range dump.Data[table] {
+			for i, element := range row {
+				// Convert []byte columns to string. This is safe to do since
+				// the pre-clustering schema only had TEXT fields and no BLOB.
+				bytes, ok := element.([]byte)
+				if ok {
+					row[i] = string(bytes)
+				}
+			}
 			result, err := tx.Exec(stmt, row...)
 			if err != nil {
 				return errors.Wrapf(err, "failed to insert row %d into %s", i, table)
@@ -99,3 +105,9 @@ type Dump struct {
 	// of interfaces.
 	Data map[string][][]interface{}
 }
+
+var preClusteringTables = []string{
+	"config",
+	"networks",
+	"networks_config",
+}
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 0719a8f61..ac201ace4 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -15,10 +15,17 @@ func TestLoadPreClusteringData(t *testing.T) {
 	dump, err := db.LoadPreClusteringData(tx)
 	require.NoError(t, err)
 
+	// config
 	assert.Equal(t, []string{"id", "key", "value"}, dump.Schema["config"])
 	assert.Len(t, dump.Data["config"], 1)
 	rows := []interface{}{int64(1), []byte("core.https_address"), []byte("1.2.3.4:666")}
 	assert.Equal(t, rows, dump.Data["config"][0])
+
+	// networks
+	assert.Equal(t, []string{"id", "name", "description"}, dump.Schema["networks"])
+	assert.Len(t, dump.Data["networks"], 1)
+	rows = []interface{}{int64(1), []byte("lxcbr0"), []byte("LXD bridge")}
+	assert.Equal(t, rows, dump.Data["networks"][0])
 }
 
 func TestImportPreClusteringData(t *testing.T) {
@@ -33,13 +40,24 @@ func TestImportPreClusteringData(t *testing.T) {
 	err = cluster.ImportPreClusteringData(dump)
 	require.NoError(t, err)
 
-	cluster.Transaction(func(tx *db.ClusterTx) error {
+	// config
+	err = cluster.Transaction(func(tx *db.ClusterTx) error {
 		config, err := tx.Config()
 		require.NoError(t, err)
 		values := map[string]string{"core.https_address": "1.2.3.4:666"}
 		assert.Equal(t, values, config)
 		return nil
 	})
+	require.NoError(t, err)
+
+	// networks
+	networks, err := cluster.Networks()
+	require.NoError(t, err)
+	assert.Equal(t, []string{"lxcbr0"}, networks)
+	id, network, err := cluster.NetworkGet("lxcbr0")
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), id)
+	assert.Equal(t, "true", network.Config["ipv4.nat"])
 }
 
 // Return a sql.Tx against a memory database populated with pre-clustering
@@ -54,6 +72,8 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 	stmts := []string{
 		preClusteringNodeSchema,
 		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
+		"INSERT INTO networks VALUES(1, 'lxcbr0', 'LXD bridge')",
+		"INSERT INTO networks_config VALUES(1, 1, 'ipv4.nat', 'true')",
 	}
 	for _, stmt := range stmts {
 		_, err := tx.Exec(stmt)
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 66b3b5913..16406c2f9 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -10,12 +10,12 @@ import (
 	"github.com/lxc/lxd/shared/api"
 )
 
-func (n *Node) Networks() ([]string, error) {
+func (c *Cluster) Networks() ([]string, error) {
 	q := fmt.Sprintf("SELECT name FROM networks")
 	inargs := []interface{}{}
 	var name string
 	outfmt := []interface{}{name}
-	result, err := queryScan(n.db, q, inargs, outfmt)
+	result, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return []string{}, err
 	}
@@ -28,19 +28,19 @@ func (n *Node) Networks() ([]string, error) {
 	return response, nil
 }
 
-func (n *Node) NetworkGet(name string) (int64, *api.Network, error) {
+func (c *Cluster) NetworkGet(name string) (int64, *api.Network, error) {
 	description := sql.NullString{}
 	id := int64(-1)
 
 	q := "SELECT id, description FROM networks WHERE name=?"
 	arg1 := []interface{}{name}
 	arg2 := []interface{}{&id, &description}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		return -1, nil, err
 	}
 
-	config, err := n.NetworkConfigGet(id)
+	config, err := c.NetworkConfigGet(id)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -56,7 +56,7 @@ func (n *Node) NetworkGet(name string) (int64, *api.Network, error) {
 	return id, &network, nil
 }
 
-func (n *Node) NetworkGetInterface(devName string) (int64, *api.Network, error) {
+func (c *Cluster) NetworkGetInterface(devName string) (int64, *api.Network, error) {
 	id := int64(-1)
 	name := ""
 	value := ""
@@ -64,7 +64,7 @@ func (n *Node) NetworkGetInterface(devName string) (int64, *api.Network, error)
 	q := "SELECT networks.id, networks.name, networks_config.value FROM networks LEFT JOIN networks_config ON networks.id=networks_config.network_id WHERE networks_config.key=\"bridge.external_interfaces\""
 	arg1 := []interface{}{}
 	arg2 := []interface{}{id, name, value}
-	result, err := queryScan(n.db, q, arg1, arg2)
+	result, err := queryScan(c.db, q, arg1, arg2)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -84,7 +84,7 @@ func (n *Node) NetworkGetInterface(devName string) (int64, *api.Network, error)
 		return -1, nil, fmt.Errorf("No network found for interface: %s", devName)
 	}
 
-	config, err := n.NetworkConfigGet(id)
+	config, err := c.NetworkConfigGet(id)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -99,7 +99,7 @@ func (n *Node) NetworkGetInterface(devName string) (int64, *api.Network, error)
 	return id, &network, nil
 }
 
-func (n *Node) NetworkConfigGet(id int64) (map[string]string, error) {
+func (c *Cluster) NetworkConfigGet(id int64) (map[string]string, error) {
 	var key, value string
 	query := `
         SELECT
@@ -108,7 +108,7 @@ func (n *Node) NetworkConfigGet(id int64) (map[string]string, error) {
 		WHERE network_id=?`
 	inargs := []interface{}{id}
 	outfmt := []interface{}{key, value}
-	results, err := queryScan(n.db, query, inargs, outfmt)
+	results, err := queryScan(c.db, query, inargs, outfmt)
 	if err != nil {
 		return nil, fmt.Errorf("Failed to get network '%d'", id)
 	}
@@ -120,7 +120,7 @@ func (n *Node) NetworkConfigGet(id int64) (map[string]string, error) {
 		 */
 		query := "SELECT id FROM networks WHERE id=?"
 		var r int
-		results, err := queryScan(n.db, query, []interface{}{id}, []interface{}{r})
+		results, err := queryScan(c.db, query, []interface{}{id}, []interface{}{r})
 		if err != nil {
 			return nil, err
 		}
@@ -142,8 +142,8 @@ func (n *Node) NetworkConfigGet(id int64) (map[string]string, error) {
 	return config, nil
 }
 
-func (n *Node) NetworkCreate(name, description string, config map[string]string) (int64, error) {
-	tx, err := begin(n.db)
+func (c *Cluster) NetworkCreate(name, description string, config map[string]string) (int64, error) {
+	tx, err := begin(c.db)
 	if err != nil {
 		return -1, err
 	}
@@ -174,13 +174,13 @@ func (n *Node) NetworkCreate(name, description string, config map[string]string)
 	return id, nil
 }
 
-func (n *Node) NetworkUpdate(name, description string, config map[string]string) error {
-	id, _, err := n.NetworkGet(name)
+func (c *Cluster) NetworkUpdate(name, description string, config map[string]string) error {
+	id, _, err := c.NetworkGet(name)
 	if err != nil {
 		return err
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -242,13 +242,13 @@ func NetworkConfigClear(tx *sql.Tx, id int64) error {
 	return nil
 }
 
-func (n *Node) NetworkDelete(name string) error {
-	id, _, err := n.NetworkGet(name)
+func (c *Cluster) NetworkDelete(name string) error {
+	id, _, err := c.NetworkGet(name)
 	if err != nil {
 		return err
 	}
 
-	_, err = exec(n.db, "DELETE FROM networks WHERE id=?", id)
+	_, err = exec(c.db, "DELETE FROM networks WHERE id=?", id)
 	if err != nil {
 		return err
 	}
@@ -256,13 +256,13 @@ func (n *Node) NetworkDelete(name string) error {
 	return nil
 }
 
-func (n *Node) NetworkRename(oldName string, newName string) error {
-	id, _, err := n.NetworkGet(oldName)
+func (c *Cluster) NetworkRename(oldName string, newName string) error {
+	id, _, err := c.NetworkGet(oldName)
 	if err != nil {
 		return err
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/node/schema.go b/lxd/db/node/schema.go
index a9754eeaa..7a0511f92 100644
--- a/lxd/db/node/schema.go
+++ b/lxd/db/node/schema.go
@@ -105,20 +105,6 @@ CREATE TABLE images_source (
     alias VARCHAR(255) NOT NULL,
     FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
 );
-CREATE TABLE networks (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    description TEXT,
-    UNIQUE (name)
-);
-CREATE TABLE networks_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    network_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    UNIQUE (network_id, key),
-    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
-);
 CREATE TABLE patches (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name VARCHAR(255) NOT NULL,
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index ce1dd6b85..0866bd63d 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -117,6 +117,9 @@ CREATE TABLE raft_nodes (
     address TEXT NOT NULL,
     UNIQUE (address)
 );
+DELETE FROM config WHERE NOT key='core.https_address';
+DROP TABLE networks_config;
+DROP TABLE networks;
 `
 	_, err := tx.Exec(stmts)
 	return err
diff --git a/lxd/db/node/update_test.go b/lxd/db/node/update_test.go
index 980ef8bf3..36d3d2136 100644
--- a/lxd/db/node/update_test.go
+++ b/lxd/db/node/update_test.go
@@ -1,13 +1,17 @@
 package node_test
 
 import (
+	"database/sql"
 	"testing"
 
 	"github.com/lxc/lxd/lxd/db/node"
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared"
+	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
-func TestUpdateFromV36(t *testing.T) {
+func TestUpdateFromV36_RaftNodes(t *testing.T) {
 	schema := node.Schema()
 	db, err := schema.ExerciseUpdate(37, nil)
 	require.NoError(t, err)
@@ -15,3 +19,27 @@ func TestUpdateFromV36(t *testing.T) {
 	_, err = db.Exec("INSERT INTO raft_nodes VALUES (1, '1.2.3.4:666')")
 	require.NoError(t, err)
 }
+
+// All model tables previously in the node database have been migrated to the
+// cluster database, and dropped from the node database.
+func TestUpdateFromV36_DropTables(t *testing.T) {
+	schema := node.Schema()
+	db, err := schema.ExerciseUpdate(37, nil)
+	require.NoError(t, err)
+
+	var current []string
+	query.Transaction(db, func(tx *sql.Tx) error {
+		var err error
+		stmt := "SELECT name FROM sqlite_master WHERE type='table'"
+		current, err = query.SelectStrings(tx, stmt)
+		return err
+	})
+	require.NoError(t, err)
+	deleted := []string{
+		"networks",
+		"networks_config",
+	}
+	for _, name := range deleted {
+		assert.False(t, shared.StringInSlice(name, current))
+	}
+}
diff --git a/lxd/devices.go b/lxd/devices.go
index 6a8f341f7..186dd8a52 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -847,7 +847,7 @@ func deviceEventListener(s *state.State) {
 
 			logger.Debugf("Scheduler: network: %s has been added: updating network priorities", e[0])
 			deviceNetworkPriority(s, e[0])
-			networkAutoAttach(s.Node, e[0])
+			networkAutoAttach(s.Cluster, e[0])
 		case e := <-chUSB:
 			deviceUSBEvent(s, e)
 		case e := <-deviceSchedRebalance:
diff --git a/lxd/networks.go b/lxd/networks.go
index 8a27a8b88..53a506782 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -36,7 +36,7 @@ func networksGet(d *Daemon, r *http.Request) Response {
 		recursion = 0
 	}
 
-	ifs, err := networkGetInterfaces(d.db)
+	ifs, err := networkGetInterfaces(d.cluster)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -88,7 +88,7 @@ func networksPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("Only 'bridge' type networks can be created"))
 	}
 
-	networks, err := networkGetInterfaces(d.db)
+	networks, err := networkGetInterfaces(d.cluster)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -137,7 +137,7 @@ func networksPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Create the database entry
-	_, err = d.db.NetworkCreate(req.Name, req.Description, req.Config)
+	_, err = d.cluster.NetworkCreate(req.Name, req.Description, req.Config)
 	if err != nil {
 		return InternalError(
 			fmt.Errorf("Error inserting %s into database: %s", req.Name, err))
@@ -176,7 +176,7 @@ func networkGet(d *Daemon, r *http.Request) Response {
 func doNetworkGet(d *Daemon, name string) (api.Network, error) {
 	// Get some information
 	osInfo, _ := net.InterfaceByName(name)
-	_, dbInfo, _ := d.db.NetworkGet(name)
+	_, dbInfo, _ := d.cluster.NetworkGet(name)
 
 	// Sanity check
 	if osInfo == nil && dbInfo == nil {
@@ -287,7 +287,7 @@ func networkPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check that the name isn't already in use
-	networks, err := networkGetInterfaces(d.db)
+	networks, err := networkGetInterfaces(d.cluster)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -309,7 +309,7 @@ func networkPut(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
 
 	// Get the existing network
-	_, dbInfo, err := d.db.NetworkGet(name)
+	_, dbInfo, err := d.cluster.NetworkGet(name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -334,7 +334,7 @@ func networkPatch(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
 
 	// Get the existing network
-	_, dbInfo, err := d.db.NetworkGet(name)
+	_, dbInfo, err := d.cluster.NetworkGet(name)
 	if dbInfo != nil {
 		return SmartError(err)
 	}
@@ -399,7 +399,7 @@ var networkCmd = Command{name: "networks/{name}", get: networkGet, delete: netwo
 
 // The network structs and functions
 func networkLoadByName(s *state.State, name string) (*network, error) {
-	id, dbInfo, err := s.Node.NetworkGet(name)
+	id, dbInfo, err := s.Cluster.NetworkGet(name)
 	if err != nil {
 		return nil, err
 	}
@@ -411,7 +411,7 @@ func networkLoadByName(s *state.State, name string) (*network, error) {
 
 func networkStartup(s *state.State) error {
 	// Get a list of managed networks
-	networks, err := s.Node.Networks()
+	networks, err := s.Cluster.Networks()
 	if err != nil {
 		return err
 	}
@@ -435,7 +435,7 @@ func networkStartup(s *state.State) error {
 
 func networkShutdown(s *state.State) error {
 	// Get a list of managed networks
-	networks, err := s.Node.Networks()
+	networks, err := s.Cluster.Networks()
 	if err != nil {
 		return err
 	}
@@ -516,7 +516,7 @@ func (n *network) Delete() error {
 	}
 
 	// Remove the network from the database
-	err := n.db.NetworkDelete(n.name)
+	err := n.state.Cluster.NetworkDelete(n.name)
 	if err != nil {
 		return err
 	}
@@ -551,7 +551,7 @@ func (n *network) Rename(name string) error {
 	}
 
 	// Rename the database entry
-	err := n.db.NetworkRename(n.name, name)
+	err := n.state.Cluster.NetworkRename(n.name, name)
 	if err != nil {
 		return err
 	}
@@ -1432,7 +1432,7 @@ func (n *network) Update(newNetwork api.NetworkPut) error {
 	n.description = newNetwork.Description
 
 	// Update the database
-	err = n.db.NetworkUpdate(n.name, n.description, n.config)
+	err = n.state.Cluster.NetworkUpdate(n.name, n.description, n.config)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/networks_utils.go b/lxd/networks_utils.go
index d10b4b00e..1de47c57a 100644
--- a/lxd/networks_utils.go
+++ b/lxd/networks_utils.go
@@ -29,8 +29,8 @@ import (
 
 var networkStaticLock sync.Mutex
 
-func networkAutoAttach(db *db.Node, devName string) error {
-	_, dbInfo, err := db.NetworkGetInterface(devName)
+func networkAutoAttach(cluster *db.Cluster, devName string) error {
+	_, dbInfo, err := cluster.NetworkGetInterface(devName)
 	if err != nil {
 		// No match found, move on
 		return nil
@@ -77,8 +77,8 @@ func networkDetachInterface(netName string, devName string) error {
 	return nil
 }
 
-func networkGetInterfaces(db *db.Node) ([]string, error) {
-	networks, err := db.Networks()
+func networkGetInterfaces(cluster *db.Cluster) ([]string, error) {
+	networks, err := cluster.Networks()
 	if err != nil {
 		return nil, err
 	}
@@ -753,7 +753,7 @@ func networkUpdateStatic(s *state.State, networkName string) error {
 	var networks []string
 	if networkName == "" {
 		var err error
-		networks, err = s.Node.Networks()
+		networks, err = s.Cluster.Networks()
 		if err != nil {
 			return err
 		}
diff --git a/lxd/patches.go b/lxd/patches.go
index 3ec89394c..37eeca1f8 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -131,7 +131,7 @@ func patchInvalidProfileNames(name string, d *Daemon) error {
 
 func patchNetworkPermissions(name string, d *Daemon) error {
 	// Get the list of networks
-	networks, err := d.db.Networks()
+	networks, err := d.cluster.Networks()
 	if err != nil {
 		return err
 	}
@@ -2394,7 +2394,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
 
 func patchNetworkDnsmasqHosts(name string, d *Daemon) error {
 	// Get the list of networks
-	networks, err := d.db.Networks()
+	networks, err := d.cluster.Networks()
 	if err != nil {
 		return err
 	}
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index 89399b6df..b13778b55 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -187,8 +187,6 @@ kill_lxd() {
         check_empty_table "${daemon_dir}/lxd.db" "containers_devices"
         check_empty_table "${daemon_dir}/lxd.db" "containers_devices_config"
         check_empty_table "${daemon_dir}/lxd.db" "containers_profiles"
-        check_empty_table "${daemon_dir}/lxd.db" "networks"
-        check_empty_table "${daemon_dir}/lxd.db" "networks_config"
         check_empty_table "${daemon_dir}/lxd.db" "images"
         check_empty_table "${daemon_dir}/lxd.db" "images_aliases"
         check_empty_table "${daemon_dir}/lxd.db" "images_properties"
@@ -201,6 +199,12 @@ kill_lxd() {
         check_empty_table "${daemon_dir}/lxd.db" "storage_pools_config"
         check_empty_table "${daemon_dir}/lxd.db" "storage_volumes"
         check_empty_table "${daemon_dir}/lxd.db" "storage_volumes_config"
+
+        echo "==> Checking for leftover cluster DB entries"
+	# FIXME: we should not use the command line sqlite client, since it's
+        #        not compatible with dqlite
+        check_empty_table "${daemon_dir}/raft/db.bin" "networks"
+        check_empty_table "${daemon_dir}/raft/db.bin" "networks_config"
     fi
 
     # teardown storage
diff --git a/test/suites/database_update.sh b/test/suites/database_update.sh
index 15189bd2f..4af380d32 100644
--- a/test/suites/database_update.sh
+++ b/test/suites/database_update.sh
@@ -9,12 +9,12 @@ test_database_update(){
   spawn_lxd "${LXD_MIGRATE_DIR}" true
 
   # Assert there are enough tables.
-  expected_tables=24
+  expected_tables=22
   tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE")
   [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; }
 
   # There should be 15 "ON DELETE CASCADE" occurrences
-  expected_cascades=15
+  expected_cascades=14
   cascades=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "ON DELETE CASCADE")
   [ "${cascades}" -eq "${expected_cascades}" ] || { echo "FAIL: Wrong number of ON DELETE CASCADE foreign keys. Found: ${cascades}, exected: ${expected_cascades}"; false; }
 

From e45dbe2f88907acab5b5442435e33b4973636e34 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 24 Oct 2017 11:06:28 +0000
Subject: [PATCH 052/227] Configure networks when joining an existing cluster
 node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go  |  1 +
 client/lxd_cluster.go | 24 +++++++++++++++++++-
 lxd/api_cluster.go    | 34 ++++++++++++++++++++++++++-
 lxd/main_init.go      | 63 ++++++++++++++++++++++++++++++++++++++++++++-------
 lxd/main_init_test.go | 21 +++++++++++++++++
 shared/api/cluster.go |  9 ++++++++
 6 files changed, 142 insertions(+), 10 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 2bb25a009..22ca2cc9f 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -162,6 +162,7 @@ type ContainerServer interface {
 	RenameStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePost) (err error)
 
 	// Cluster functions ("cluster" API extensions)
+	GetCluster(password string) (cluster *api.Cluster, err error)
 	BootstrapCluster(name string) (op *Operation, err error)
 	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
 	JoinCluster(targetAddress, targetPassword string, targetCert []byte, name string) (op *Operation, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 4c8de7bad..7d153cbb5 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -1,6 +1,28 @@
 package lxd
 
-import "github.com/lxc/lxd/shared/api"
+import (
+	"fmt"
+
+	"github.com/lxc/lxd/shared/api"
+)
+
+// GetCluster returns information about a cluster.
+//
+// If this client is not trusted, the password must be supplied.
+func (r *ProtocolLXD) GetCluster(password string) (*api.Cluster, error) {
+	cluster := &api.Cluster{}
+	path := "/cluster"
+	if password != "" {
+		path += fmt.Sprintf("?password=%s", password)
+	}
+	_, err := r.queryStruct("GET", path, nil, "", &cluster)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return cluster, nil
+}
 
 // BootstrapCluster requests to bootstrap a new cluster.
 func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index c00d763c5..28b3c46f7 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -15,7 +15,39 @@ import (
 	"github.com/pkg/errors"
 )
 
-var clusterCmd = Command{name: "cluster", untrustedPost: true, post: clusterPost}
+var clusterCmd = Command{name: "cluster", untrustedGet: true, get: clusterGet, untrustedPost: true, post: clusterPost}
+
+func clusterGet(d *Daemon, r *http.Request) Response {
+	// If the client is not trusted, check that it's presenting the trust
+	// password.
+	trusted := d.checkTrustedClient(r) == nil
+	if !trusted {
+		secret, err := cluster.ConfigGetString(d.cluster, "core.trust_password")
+		if err != nil {
+			return SmartError(err)
+		}
+		if util.PasswordCheck(secret, r.FormValue("password")) != nil {
+			return Forbidden
+		}
+	}
+
+	cluster := api.Cluster{}
+
+	// Fill the Networks attribute
+	networks, err := d.cluster.Networks()
+	if err != nil {
+		return SmartError(err)
+	}
+	for _, name := range networks {
+		_, network, err := d.cluster.NetworkGet(name)
+		if err != nil {
+			return SmartError(err)
+		}
+		cluster.Networks = append(cluster.Networks, *network)
+	}
+
+	return SyncResponse(true, cluster)
+}
 
 func clusterPost(d *Daemon, r *http.Request) Response {
 	req := api.ClusterPost{}
diff --git a/lxd/main_init.go b/lxd/main_init.go
index 4cd6a6f94..12c11c1d5 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -6,6 +6,7 @@ import (
 	"net"
 	"os"
 	"os/exec"
+	"sort"
 	"strconv"
 	"strings"
 	"syscall"
@@ -168,6 +169,25 @@ func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerS
 			Port:          clustering.Port,
 			TrustPassword: clustering.TrustPassword,
 		}
+		if clustering.TargetAddress != "" {
+			// Client parameters to connect to the target cluster node.
+			args := &lxd.ConnectionArgs{
+				TLSServerCert: string(clustering.TargetCert),
+			}
+			url := fmt.Sprintf("https://%s", clustering.TargetAddress)
+			client, err := lxd.ConnectLXD(url, args)
+			if err != nil {
+				return err
+			}
+			cluster, err := client.GetCluster(clustering.TargetPassword)
+			if err != nil {
+				return err
+			}
+			data.Networks, err = cmd.askClusteringNetworks(cluster)
+			if err != nil {
+				return err
+			}
+		}
 	}
 
 	_, err = exec.LookPath("dnsmasq")
@@ -426,13 +446,6 @@ func (cmd *CmdInit) apply(client lxd.ContainerServer, data *cmdInitData) error {
 		return cmd.initConfig(client, data.Config)
 	})
 
-	// Cluster changers
-	if data.Cluster.Name != "" {
-		changers = append(changers, func() (reverter, error) {
-			return cmd.initCluster(client, data.Cluster)
-		})
-	}
-
 	// Storage pool changers
 	for i := range data.Pools {
 		pool := data.Pools[i] // Local variable for the closure
@@ -457,6 +470,13 @@ func (cmd *CmdInit) apply(client lxd.ContainerServer, data *cmdInitData) error {
 		})
 	}
 
+	// Cluster changers
+	if data.Cluster.Name != "" {
+		changers = append(changers, func() (reverter, error) {
+			return cmd.initCluster(client, data.Cluster)
+		})
+	}
+
 	// Apply all changes. If anything goes wrong at any iteration
 	// of the loop, we'll try to revert any change performed in
 	// earlier iterations.
@@ -800,7 +820,7 @@ join:
 		goto join
 	}
 	digest := shared.CertFingerprint(certificate)
-	askFingerprint := fmt.Sprintf("Remote node fingerprint: %s ok (y/n)? ", digest)
+	askFingerprint := fmt.Sprintf("Remote node fingerprint: %s ok (yes/no)? ", digest)
 	if !cmd.Context.AskBool(askFingerprint, "") {
 		return nil, fmt.Errorf("Cluster certificate NACKed by user")
 	}
@@ -816,6 +836,33 @@ join:
 	return params, nil
 }
 
+func (cmd *CmdInit) askClusteringNetworks(cluster *api.Cluster) ([]api.NetworksPost, error) {
+	networks := make([]api.NetworksPost, len(cluster.Networks))
+	for i, network := range cluster.Networks {
+		if !network.Managed {
+			continue
+		}
+		post := api.NetworksPost{}
+		post.Name = network.Name
+		post.Config = network.Config
+		post.Type = network.Type
+		post.Managed = true
+		// Sort config keys to get a stable ordering (expecially for tests)
+		keys := []string{}
+		for key := range post.Config {
+			keys = append(keys, key)
+		}
+		sort.Strings(keys)
+		for _, key := range keys {
+			question := fmt.Sprintf(
+				`Enter local value for key "%s" of network "%s": `, key, post.Name)
+			post.Config[key] = cmd.Context.AskString(question, "", nil)
+		}
+		networks[i] = post
+	}
+	return networks, nil
+}
+
 // Ask if the user wants to create a new storage pool, and return
 // the relevant parameters if so.
 func (cmd *CmdInit) askStorage(client lxd.ContainerServer, existingPools []string, availableBackends []string) (*cmdInitStorageParams, error) {
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 13af4cf48..6eef12c19 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -161,6 +161,17 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
 	f := clusterFixture{t: suite.T()}
 	f.FormCluster([]*Daemon{leader})
 
+	network := api.NetworksPost{
+		Name:    "mybr",
+		Type:    "bridge",
+		Managed: true,
+	}
+	network.Config = map[string]string{
+		"ipv4.nat": "true",
+	}
+	client := f.ClientUnix(leader)
+	client.CreateNetwork(network)
+
 	suite.command.PasswordReader = func(int) ([]byte, error) {
 		return []byte("sekret"), nil
 	}
@@ -174,6 +185,12 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
 		ClusterTargetNodeAddress: leader.endpoints.NetworkAddress(),
 		ClusterAcceptFingerprint: true,
 		ClusterConfirmLosingData: true,
+		ClusterConfig: []string{
+			"10.23.189.2/24", // ipv4.address
+			"true",           // ipv4.nat
+			"aaaa:bbbb:cccc:dddd::1/64", // ipv6.address
+			"true", // ipv6.nat
+		},
 	}
 	answers.Render(suite.streams)
 
@@ -752,6 +769,7 @@ type cmdInitAnswers struct {
 	ClusterTargetNodeAddress string
 	ClusterAcceptFingerprint bool
 	ClusterConfirmLosingData bool
+	ClusterConfig            []string
 	WantStoragePool          bool
 	WantAvailableOverNetwork bool
 	BindToAddress            string
@@ -775,6 +793,9 @@ func (answers *cmdInitAnswers) Render(streams *cmd.MemoryStreams) {
 			streams.InputAppendLine(answers.ClusterTargetNodeAddress)
 			streams.InputAppendBoolAnswer(answers.ClusterAcceptFingerprint)
 			streams.InputAppendBoolAnswer(answers.ClusterConfirmLosingData)
+			for _, value := range answers.ClusterConfig {
+				streams.InputAppendLine(value)
+			}
 		}
 	}
 	streams.InputAppendBoolAnswer(answers.WantStoragePool)
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 4f54d2ada..5000394c5 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -1,5 +1,10 @@
 package api
 
+// Cluster represents high-level information about a LXD cluster.
+type Cluster struct {
+	Networks []Network
+}
+
 // ClusterPost represents the fields required to bootstrap or join a LXD
 // cluster.
 //
@@ -16,12 +21,16 @@ type ClusterPost struct {
 }
 
 // ClusterNodeAccepted represents the response of a request to join a cluster.
+//
+// API extension: cluster
 type ClusterNodeAccepted struct {
 	RaftNodes  []RaftNode `json:"raft_nodes" yaml:"raft_nodes"`
 	PrivateKey []byte     `json:"private_key" yaml:"private_key"`
 }
 
 // RaftNode represents the a LXD node that is part of the dqlite raft cluster.
+//
+// API extension: cluster
 type RaftNode struct {
 	ID      int64  `json:"id" yaml:"id"`
 	Address string `json:"address" yaml:"address"`

From 6553ae1ffba8fedc4bf5ea2048875191d43d29ae Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 24 Oct 2017 13:20:27 +0000
Subject: [PATCH 053/227] Add cluster-related db schema for networks

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go                |  8 ++++++--
 lxd/cluster/membership.go     | 10 +++++-----
 lxd/cluster/notify.go         |  2 +-
 lxd/cluster/notify_test.go    | 25 ++++++++++++++++++++-----
 lxd/db/cluster/open.go        | 28 +++++++++++++++++++++++-----
 lxd/db/cluster/open_test.go   |  1 +
 lxd/db/cluster/query.go       |  7 ++++---
 lxd/db/cluster/schema.go      | 15 +++++++++++++--
 lxd/db/cluster/update.go      | 15 +++++++++++++--
 lxd/db/cluster/update_test.go | 42 +++++++++++++++++++++++++++++++++---------
 lxd/db/db.go                  | 24 ++++++++++++++++++++++++
 lxd/db/migration.go           | 11 ++++++++---
 lxd/db/networks.go            | 37 +++++++++++++++++++++++++------------
 lxd/db/node.go                | 16 ++++++++++++++++
 lxd/db/node_test.go           | 10 +++++-----
 lxd/main_init_test.go         |  2 +-
 16 files changed, 198 insertions(+), 55 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 54602c6c3..7a26e2591 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -335,9 +335,13 @@ func doApi10UpdateTriggers(d *Daemon, changed map[string]string, config *cluster
 				return err
 			}
 		case "images.auto_update_interval":
-			d.taskAutoUpdate.Reset()
+			if !d.os.MockMode {
+				d.taskAutoUpdate.Reset()
+			}
 		case "images.remote_cache_expiry":
-			d.taskPruneImages.Reset()
+			if !d.os.MockMode {
+				d.taskPruneImages.Reset()
+			}
 		}
 	}
 	if maasControllerChanged {
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index e923f0e05..d635ad9c0 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -61,7 +61,7 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 		return err
 	}
 
-	// Insert ourselves into the nodes table.
+	// Update our own entry in the nodes table.
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		// Make sure cluster database state is in order.
 		err := membershipCheckClusterStateForBootstrapOrJoin(tx)
@@ -70,9 +70,9 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 		}
 
 		// Add ourselves to the nodes table.
-		_, err = tx.NodeAdd(name, address)
+		err = tx.NodeUpdate(1, name, address)
 		if err != nil {
-			return errors.Wrap(err, "failed to insert cluster node")
+			return errors.Wrap(err, "failed to update cluster node")
 		}
 
 		return nil
@@ -320,7 +320,7 @@ func membershipCheckClusterStateForBootstrapOrJoin(tx *db.ClusterTx) error {
 	if err != nil {
 		return errors.Wrap(err, "failed to fetch current cluster nodes")
 	}
-	if len(nodes) > 0 {
+	if len(nodes) != 1 {
 		return fmt.Errorf("inconsistent state: found leftover entries in nodes")
 	}
 	return nil
@@ -332,7 +332,7 @@ func membershipCheckClusterStateForAccept(tx *db.ClusterTx, name string, address
 	if err != nil {
 		return errors.Wrap(err, "failed to fetch current cluster nodes")
 	}
-	if len(nodes) == 0 {
+	if len(nodes) == 1 && nodes[0].Address == "0.0.0.0" {
 		return fmt.Errorf("clustering not enabled")
 	}
 
diff --git a/lxd/cluster/notify.go b/lxd/cluster/notify.go
index 860692111..cb5a69a76 100644
--- a/lxd/cluster/notify.go
+++ b/lxd/cluster/notify.go
@@ -49,7 +49,7 @@ func NewNotifier(state *state.State, cert *shared.CertInfo, policy NotifierPolic
 			return err
 		}
 		for _, node := range nodes {
-			if node.Address == address {
+			if node.Address == address || node.Address == "0.0.0.0" {
 				continue // Exclude ourselves
 			}
 			if node.IsDown() {
diff --git a/lxd/cluster/notify_test.go b/lxd/cluster/notify_test.go
index 409d04d8b..1dd2fea9a 100644
--- a/lxd/cluster/notify_test.go
+++ b/lxd/cluster/notify_test.go
@@ -32,16 +32,26 @@ func TestNewNotifier(t *testing.T) {
 	notifier, err := cluster.NewNotifier(state, cert, cluster.NotifyAll)
 	require.NoError(t, err)
 
-	i := 0
+	peers := make(chan string, 2)
 	hook := func(client lxd.ContainerServer) error {
 		server, _, err := client.GetServer()
 		require.NoError(t, err)
-		assert.Equal(t, f.Address(i+1), server.Config["core.https_address"])
-		i++
+		peers <- server.Config["core.https_address"].(string)
 		return nil
 	}
 	assert.NoError(t, notifier(hook))
-	assert.Equal(t, 2, i)
+
+	addresses := make([]string, 2)
+	for i := range addresses {
+		select {
+		case addresses[i] = <-peers:
+		default:
+		}
+	}
+	require.NoError(t, err)
+	for i := range addresses {
+		assert.True(t, shared.StringInSlice(f.Address(i+1), addresses))
+	}
 }
 
 // Creating a new notifier fails if the policy is set to NotifyAll and one of
@@ -108,7 +118,12 @@ func (h *notifyFixtures) Nodes(cert *shared.CertInfo, n int) func() {
 		for i := 0; i < n; i++ {
 			name := strconv.Itoa(i)
 			address := servers[i].Listener.Addr().String()
-			_, err := tx.NodeAdd(name, address)
+			var err error
+			if i == 0 {
+				err = tx.NodeUpdate(int64(1), name, address)
+			} else {
+				_, err = tx.NodeAdd(name, address)
+			}
 			require.NoError(h.t, err)
 		}
 		return nil
diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index 03fe5ece9..bcbb3a727 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -57,12 +57,16 @@ func EnsureSchema(db *sql.DB, address string) (bool, error) {
 		}
 
 		// Check if we're clustered
-		n, err := selectNodesCount(tx)
+		n, err := selectUnclusteredNodesCount(tx)
 		if err != nil {
-			return errors.Wrap(err, "failed to fetch current nodes count")
+			return errors.Wrap(err, "failed to fetch unclustered nodes count")
 		}
-		if n == 0 {
-			return nil // Nothing to do.
+		if n > 1 {
+			// This should never happen, since we only add nodes
+			// with valid addresses, but check it for sanity.
+			return fmt.Errorf("found more than one unclustered nodes")
+		} else if n == 1 {
+			address = "0.0.0.0" // We're not clustered
 		}
 
 		// Update the schema and api_extension columns of ourselves.
@@ -83,13 +87,27 @@ func EnsureSchema(db *sql.DB, address string) (bool, error) {
 	schema := Schema()
 	schema.Check(check)
 
-	_, err := schema.Ensure(db)
+	initial, err := schema.Ensure(db)
 	if someNodesAreBehind {
 		return false, nil
 	}
 	if err != nil {
 		return false, err
 	}
+
+	// When creating a database from scratch, insert an entry for node
+	// 1. This is needed for referential integrity with other tables.
+	if initial == 0 {
+		stmt := `
+INSERT INTO nodes(id, name, address, schema, api_extensions) VALUES(1, 'none', '0.0.0.0', ?, ?)
+`
+		_, err := db.Exec(stmt, SchemaVersion, apiExtensions)
+		if err != nil {
+			return false, err
+		}
+
+	}
+
 	return true, err
 }
 
diff --git a/lxd/db/cluster/open_test.go b/lxd/db/cluster/open_test.go
index f858d7b35..5a83789ca 100644
--- a/lxd/db/cluster/open_test.go
+++ b/lxd/db/cluster/open_test.go
@@ -16,6 +16,7 @@ import (
 // If the node is not clustered, the schema updates works normally.
 func TestEnsureSchema_NoClustered(t *testing.T) {
 	db := newDB(t)
+	addNode(t, db, "0.0.0.0", 1, 1)
 	ready, err := cluster.EnsureSchema(db, "1.2.3.4:666")
 	assert.True(t, ready)
 	assert.NoError(t, err)
diff --git a/lxd/db/cluster/query.go b/lxd/db/cluster/query.go
index 286ffe2db..dda6b63c5 100644
--- a/lxd/db/cluster/query.go
+++ b/lxd/db/cluster/query.go
@@ -27,9 +27,10 @@ func updateNodeVersion(tx *sql.Tx, address string, apiExtensions int) error {
 	return nil
 }
 
-// Return the number of rows in the nodes table.
-func selectNodesCount(tx *sql.Tx) (int, error) {
-	return query.Count(tx, "nodes", "")
+// Return the number of rows in the nodes table that have their address column
+// set to '0.0.0.0'.
+func selectUnclusteredNodesCount(tx *sql.Tx) (int, error) {
+	return query.Count(tx, "nodes", "address='0.0.0.0'")
 }
 
 // Return a slice of binary integer tuples. Each tuple contains the schema
diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 3354e36b2..424e078d0 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -16,15 +16,26 @@ CREATE TABLE networks (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name VARCHAR(255) NOT NULL,
     description TEXT,
+    state INTEGER NOT NULL DEFAULT 0,
     UNIQUE (name)
 );
 CREATE TABLE networks_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     network_id INTEGER NOT NULL,
+    node_id INTEGER,
     key VARCHAR(255) NOT NULL,
     value TEXT,
-    UNIQUE (network_id, key),
-    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
+    UNIQUE (network_id, node_id, key),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE networks_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    network_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (network_id, node_id),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
 CREATE TABLE nodes (
     id INTEGER PRIMARY KEY,
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 967ac40f4..c6b38d60b 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -40,15 +40,26 @@ CREATE TABLE networks (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name VARCHAR(255) NOT NULL,
     description TEXT,
+    state INTEGER NOT NULL DEFAULT 0,
     UNIQUE (name)
 );
+CREATE TABLE networks_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    network_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (network_id, node_id),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
 CREATE TABLE networks_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     network_id INTEGER NOT NULL,
+    node_id INTEGER,
     key VARCHAR(255) NOT NULL,
     value TEXT,
-    UNIQUE (network_id, key),
-    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
+    UNIQUE (network_id, node_id, key),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
 `
 	_, err := tx.Exec(stmt)
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index f56f63e11..97d94898a 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -47,11 +47,11 @@ func TestUpdateFromV1_Network(t *testing.T) {
 	db, err := schema.ExerciseUpdate(2, nil)
 	require.NoError(t, err)
 
-	_, err = db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah')")
+	_, err = db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah', 1)")
 	require.NoError(t, err)
 
 	// Unique constraint on name.
-	_, err = db.Exec("INSERT INTO networks VALUES (2, 'foo', 'gosh')")
+	_, err = db.Exec("INSERT INTO networks VALUES (2, 'foo', 'gosh', 1)")
 	require.Error(t, err)
 }
 
@@ -60,26 +60,50 @@ func TestUpdateFromV1_NetworkConfig(t *testing.T) {
 	db, err := schema.ExerciseUpdate(2, nil)
 	require.NoError(t, err)
 
-	_, err = db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah')")
+	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'one', '', '1.1.1.1', 666, 999, ?)", time.Now())
 	require.NoError(t, err)
 
-	_, err = db.Exec("INSERT INTO networks_config VALUES (1, 1, 'bar', 'baz')")
+	_, err = db.Exec("INSERT INTO nodes VALUES (2, 'two', '', '2.2.2.2', 666, 999, ?)", time.Now())
 	require.NoError(t, err)
 
-	// Unique constraint on network_id/key.
-	_, err = db.Exec("INSERT INTO networks_config VALUES (2, 1, 'bar', 'egg')")
+	_, err = db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah', 1)")
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO networks_config VALUES (1, 1, 1, 'bar', 'baz')")
+	require.NoError(t, err)
+
+	// Unique constraint on network_id/node_id/key.
+	_, err = db.Exec("INSERT INTO networks_config VALUES (2, 1, 1, 'bar', 'egg')")
 	require.Error(t, err)
+	_, err = db.Exec("INSERT INTO networks_config VALUES (3, 1, 2, 'bar', 'egg')")
+	require.NoError(t, err)
 
 	// Reference constraint on network_id.
-	_, err = db.Exec("INSERT INTO networks_config VALUES (3, , 'fuz', 'buz')")
+	_, err = db.Exec("INSERT INTO networks_config VALUES (4, 2, 1, 'fuz', 'buz')")
+	require.Error(t, err)
+
+	// Reference constraint on node_id.
+	_, err = db.Exec("INSERT INTO networks_config VALUES (5, 1, 3, 'fuz', 'buz')")
 	require.Error(t, err)
 
-	// Cascade deletes
-	result, err := db.Exec("DELETE FROM networks")
+	// Cascade deletes on node_id
+	result, err := db.Exec("DELETE FROM nodes WHERE id=2")
 	require.NoError(t, err)
 	n, err := result.RowsAffected()
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), n)
+	result, err = db.Exec("UPDATE networks_config SET value='yuk'")
+	require.NoError(t, err)
+	n, err = result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), n) // Only one row was affected, since the other got deleted
+
+	// Cascade deletes on network_id
+	result, err = db.Exec("DELETE FROM networks")
+	require.NoError(t, err)
+	n, err = result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), n)
 	result, err = db.Exec("DELETE FROM networks_config")
 	require.NoError(t, err)
 	n, err = result.RowsAffected()
diff --git a/lxd/db/db.go b/lxd/db/db.go
index e21c429aa..985e7b2f2 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -145,6 +145,7 @@ func (n *Node) Begin() (*sql.Tx, error) {
 // Cluster mediates access to LXD's data stored in the cluster dqlite database.
 type Cluster struct {
 	db *sql.DB // Handle to the cluster dqlite database, gated behind gRPC SQL.
+	id int64   // Node ID of this LXD instance.
 }
 
 // OpenCluster creates a new Cluster object for interacting with the dqlite
@@ -174,6 +175,29 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 		db: db,
 	}
 
+	// Figure out the ID of this node.
+	err = cluster.Transaction(func(tx *ClusterTx) error {
+		nodes, err := tx.Nodes()
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch nodes")
+		}
+		if len(nodes) == 1 && nodes[0].Address == "0.0.0.0" {
+			// We're not clustered
+			cluster.id = 1
+			return nil
+		}
+		for _, node := range nodes {
+			if node.Address == address {
+				cluster.id = node.ID
+				return nil
+			}
+		}
+		return fmt.Errorf("no node registered with address %s", address)
+	})
+	if err != nil {
+		return nil, err
+	}
+
 	return cluster, nil
 }
 
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 737d3b11b..084c14b85 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -66,9 +66,6 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 	}
 
 	for _, table := range preClusteringTables {
-		columns := dump.Schema[table]
-		stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
-		stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
 		for i, row := range dump.Data[table] {
 			for i, element := range row {
 				// Convert []byte columns to string. This is safe to do since
@@ -78,6 +75,14 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 					row[i] = string(bytes)
 				}
 			}
+			columns := dump.Schema[table]
+			switch table {
+			case "networks_config":
+				columns = append(columns, "node_id")
+				row = append(row, int64(1))
+			}
+			stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
+			stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
 			result, err := tx.Exec(stmt, row...)
 			if err != nil {
 				return errors.Wrapf(err, "failed to insert row %d into %s", i, table)
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 16406c2f9..f56e9ea60 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -7,6 +7,7 @@ import (
 
 	_ "github.com/mattn/go-sqlite3"
 
+	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/api"
 )
 
@@ -61,8 +62,8 @@ func (c *Cluster) NetworkGetInterface(devName string) (int64, *api.Network, erro
 	name := ""
 	value := ""
 
-	q := "SELECT networks.id, networks.name, networks_config.value FROM networks LEFT JOIN networks_config ON networks.id=networks_config.network_id WHERE networks_config.key=\"bridge.external_interfaces\""
-	arg1 := []interface{}{}
+	q := "SELECT networks.id, networks.name, networks_config.value FROM networks LEFT JOIN networks_config ON networks.id=networks_config.network_id WHERE networks_config.key=\"bridge.external_interfaces\" AND networks_config.node_id=?"
+	arg1 := []interface{}{c.id}
 	arg2 := []interface{}{id, name, value}
 	result, err := queryScan(c.db, q, arg1, arg2)
 	if err != nil {
@@ -105,8 +106,9 @@ func (c *Cluster) NetworkConfigGet(id int64) (map[string]string, error) {
         SELECT
             key, value
         FROM networks_config
-		WHERE network_id=?`
-	inargs := []interface{}{id}
+		WHERE network_id=?
+                AND node_id=?`
+	inargs := []interface{}{id, c.id}
 	outfmt := []interface{}{key, value}
 	results, err := queryScan(c.db, query, inargs, outfmt)
 	if err != nil {
@@ -160,7 +162,16 @@ func (c *Cluster) NetworkCreate(name, description string, config map[string]stri
 		return -1, err
 	}
 
-	err = NetworkConfigAdd(tx, id, config)
+	// Insert a node-specific entry pointing to ourselves.
+	columns := []string{"network_id", "node_id"}
+	values := []interface{}{id, c.id}
+	_, err = query.UpsertObject(tx, "networks_nodes", columns, values)
+	if err != nil {
+		tx.Rollback()
+		return -1, err
+	}
+
+	err = NetworkConfigAdd(tx, id, c.id, config)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -191,13 +202,13 @@ func (c *Cluster) NetworkUpdate(name, description string, config map[string]stri
 		return err
 	}
 
-	err = NetworkConfigClear(tx, id)
+	err = NetworkConfigClear(tx, id, c.id)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = NetworkConfigAdd(tx, id, config)
+	err = NetworkConfigAdd(tx, id, c.id, config)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -211,8 +222,8 @@ func NetworkUpdateDescription(tx *sql.Tx, id int64, description string) error {
 	return err
 }
 
-func NetworkConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {
-	str := fmt.Sprintf("INSERT INTO networks_config (network_id, key, value) VALUES(?, ?, ?)")
+func NetworkConfigAdd(tx *sql.Tx, networkID, nodeID int64, config map[string]string) error {
+	str := fmt.Sprintf("INSERT INTO networks_config (network_id, node_id, key, value) VALUES(?, ?, ?, ?)")
 	stmt, err := tx.Prepare(str)
 	defer stmt.Close()
 	if err != nil {
@@ -224,7 +235,7 @@ func NetworkConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {
 			continue
 		}
 
-		_, err = stmt.Exec(id, k, v)
+		_, err = stmt.Exec(networkID, nodeID, k, v)
 		if err != nil {
 			return err
 		}
@@ -233,8 +244,10 @@ func NetworkConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {
 	return nil
 }
 
-func NetworkConfigClear(tx *sql.Tx, id int64) error {
-	_, err := tx.Exec("DELETE FROM networks_config WHERE network_id=?", id)
+func NetworkConfigClear(tx *sql.Tx, networkID, nodeID int64) error {
+	_, err := tx.Exec(
+		"DELETE FROM networks_config WHERE network_id=? AND node_id=?",
+		networkID, nodeID)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 96fd70bf7..506eb44a6 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -64,6 +64,22 @@ func (c *ClusterTx) NodeAdd(name string, address string) (int64, error) {
 	return query.UpsertObject(c.tx, "nodes", columns, values)
 }
 
+// NodeUpdate updates the name an address of a node.
+func (c *ClusterTx) NodeUpdate(id int64, name string, address string) error {
+	result, err := c.tx.Exec("UPDATE nodes SET name=?, address=? WHERE id=?", name, address, id)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return fmt.Errorf("query updated %d rows instead of 1", n)
+	}
+	return nil
+}
+
 // NodeHeartbeat updates the heartbeat column of the node with the given address.
 func (c *ClusterTx) NodeHeartbeat(address string, heartbeat time.Time) error {
 	stmt := "UPDATE nodes SET heartbeat=? WHERE address=?"
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 2dbdf0efc..c213580ae 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -18,13 +18,13 @@ func TestNodeAdd(t *testing.T) {
 
 	id, err := tx.NodeAdd("buzz", "1.2.3.4:666")
 	require.NoError(t, err)
-	assert.Equal(t, int64(1), id)
+	assert.Equal(t, int64(2), id)
 
 	nodes, err := tx.Nodes()
 	require.NoError(t, err)
-	require.Len(t, nodes, 1)
+	require.Len(t, nodes, 2)
 
-	node := nodes[0]
+	node := nodes[1]
 	assert.Equal(t, "buzz", node.Name)
 	assert.Equal(t, "1.2.3.4:666", node.Address)
 	assert.Equal(t, cluster.SchemaVersion, node.Schema)
@@ -45,8 +45,8 @@ func TestNodeHeartbeat(t *testing.T) {
 
 	nodes, err := tx.Nodes()
 	require.NoError(t, err)
-	require.Len(t, nodes, 1)
+	require.Len(t, nodes, 2)
 
-	node := nodes[0]
+	node := nodes[1]
 	assert.True(t, node.IsDown())
 }
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 6eef12c19..952af995b 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -170,7 +170,7 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
 		"ipv4.nat": "true",
 	}
 	client := f.ClientUnix(leader)
-	client.CreateNetwork(network)
+	suite.Req.Nil(client.CreateNetwork(network))
 
 	suite.command.PasswordReader = func(int) ([]byte, error) {
 		return []byte("sekret"), nil

From 287b36982c8d58a0082f270caabe2d8b01443a24 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 24 Oct 2017 19:32:49 +0000
Subject: [PATCH 054/227] Update networks_config table with joining node's data

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/heartbeat_test.go  |  9 +++++-
 lxd/cluster/membership.go      | 41 ++++++++++++++++++++++++--
 lxd/cluster/membership_test.go | 29 +++++++++++-------
 lxd/db/cluster/schema.go       |  6 ++--
 lxd/db/cluster/update.go       |  6 ++--
 lxd/db/db.go                   | 14 +++++++--
 lxd/db/networks.go             | 67 ++++++++++++++++++++++++++++++++++++++++--
 lxd/db/node.go                 | 36 +++++++++++++++++++----
 lxd/db/node_test.go            |  2 +-
 9 files changed, 178 insertions(+), 32 deletions(-)

diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
index d129264d5..1e78496f3 100644
--- a/lxd/cluster/heartbeat_test.go
+++ b/lxd/cluster/heartbeat_test.go
@@ -172,7 +172,6 @@ func (f *heartbeatFixture) node() (*state.State, *cluster.Gateway, string) {
 
 	mux := http.NewServeMux()
 	server := newServer(cert, mux)
-	f.cleanups = append(f.cleanups, server.Close)
 
 	for path, handler := range gateway.HandlerFuncs() {
 		mux.HandleFunc(path, handler)
@@ -182,6 +181,11 @@ func (f *heartbeatFixture) node() (*state.State, *cluster.Gateway, string) {
 	mf := &membershipFixtures{t: f.t, state: state}
 	mf.NetworkAddress(address)
 
+	var err error
+	require.NoError(f.t, state.Cluster.Close())
+	state.Cluster, err = db.OpenCluster("db.bin", gateway.Dialer(), address)
+	require.NoError(f.t, err)
+
 	f.gateways[len(f.gateways)] = gateway
 	f.states[gateway] = state
 	f.servers[gateway] = server
@@ -194,4 +198,7 @@ func (f *heartbeatFixture) Cleanup() {
 	for i := len(f.cleanups) - 1; i >= 0; i-- {
 		f.cleanups[i]()
 	}
+	for _, server := range f.servers {
+		server.Close()
+	}
 }
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index d635ad9c0..50c1de33a 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -228,6 +228,18 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 		return err
 	}
 
+	// Get the local config keys for the cluster networks. It assumes that
+	// the local networks match the cluster networks, if not an error will
+	// be returned.
+	var networks map[string]map[string]string
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		networks, err = tx.NetworkConfigs()
+		return err
+	})
+	if err != nil {
+		return err
+	}
+
 	// Shutdown the gateway and wipe any raft data. This will trash any
 	// gRPC SQL connection against our in-memory dqlite driver and shutdown
 	// the associated raft instance.
@@ -273,10 +285,33 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 	// Make sure we can actually connect to the cluster database through
 	// the network endpoint. This also makes the Go SQL pooling system
 	// invalidate the old connection, so new queries will be executed over
-	// the new gRPC network connection.
+	// the new gRPC network connection. Also, update the networks table
+	// with our local configuration.
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-		_, err := tx.Nodes()
-		return err
+		node, err := tx.Node(address)
+		if err != nil {
+			return errors.Wrap(err, "failed to get ID of joining node")
+		}
+		state.Cluster.ID(node.ID)
+		ids, err := tx.NetworkIDs()
+		if err != nil {
+			return errors.Wrap(err, "failed to get cluster network IDs")
+		}
+		for name, id := range ids {
+			config, ok := networks[name]
+			if !ok {
+				return fmt.Errorf("joining node has no config for network %s", name)
+			}
+			err := tx.NetworkNodeJoin(id, node.ID)
+			if err != nil {
+				return errors.Wrap(err, "failed to add joining node's to the network")
+			}
+			err = tx.NetworkConfigAdd(id, node.ID, config)
+			if err != nil {
+				return errors.Wrap(err, "failed to add joining node's network config")
+			}
+		}
+		return nil
 	})
 	if err != nil {
 		return errors.Wrap(err, "cluster database initialization failed")
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 83b8a5576..fd4489ac0 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -242,45 +242,54 @@ func TestAccept_MaxRaftNodes(t *testing.T) {
 
 func TestJoin(t *testing.T) {
 	// Setup a target node running as leader of a cluster.
+	targetCert := shared.TestingKeyPair()
+	targetMux := http.NewServeMux()
+	targetServer := newServer(targetCert, targetMux)
+	defer targetServer.Close()
+
 	targetState, cleanup := state.NewTestState(t)
 	defer cleanup()
 
-	targetCert := shared.TestingKeyPair()
 	targetGateway := newGateway(t, targetState.Node, targetCert)
 	defer targetGateway.Shutdown()
 
-	targetMux := http.NewServeMux()
-	targetServer := newServer(targetCert, targetMux)
-	defer targetServer.Close()
-
 	for path, handler := range targetGateway.HandlerFuncs() {
 		targetMux.HandleFunc(path, handler)
 	}
 
 	targetAddress := targetServer.Listener.Addr().String()
+	var err error
+	require.NoError(t, targetState.Cluster.Close())
+	targetState.Cluster, err = db.OpenCluster("db.bin", targetGateway.Dialer(), targetAddress)
+	require.NoError(t, err)
 	targetF := &membershipFixtures{t: t, state: targetState}
 	targetF.NetworkAddress(targetAddress)
 
-	err := cluster.Bootstrap(targetState, targetGateway, "buzz")
+	err = cluster.Bootstrap(targetState, targetGateway, "buzz")
 	require.NoError(t, err)
 
 	// Setup a joining node
+	mux := http.NewServeMux()
+	server := newServer(targetCert, mux)
+	defer server.Close()
+
 	state, cleanup := state.NewTestState(t)
 	defer cleanup()
 
 	cert := shared.TestingAltKeyPair()
 	gateway := newGateway(t, state.Node, cert)
-	defer gateway.Shutdown()
 
-	mux := http.NewServeMux()
-	server := newServer(cert, mux)
-	defer server.Close()
+	defer gateway.Shutdown()
 
 	for path, handler := range gateway.HandlerFuncs() {
 		mux.HandleFunc(path, handler)
 	}
 
 	address := server.Listener.Addr().String()
+	require.NoError(t, state.Cluster.Close())
+	state.Cluster, err = db.OpenCluster("db.bin", gateway.Dialer(), address)
+	require.NoError(t, err)
+
 	f := &membershipFixtures{t: t, state: state}
 	f.NetworkAddress(address)
 
diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 424e078d0..f003934b9 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -8,13 +8,13 @@ package cluster
 const freshSchema = `
 CREATE TABLE config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    key VARCHAR(255) NOT NULL,
+    key TEXT NOT NULL,
     value TEXT,
     UNIQUE (key)
 );
 CREATE TABLE networks (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
+    name TEXT NOT NULL,
     description TEXT,
     state INTEGER NOT NULL DEFAULT 0,
     UNIQUE (name)
@@ -23,7 +23,7 @@ CREATE TABLE networks_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     network_id INTEGER NOT NULL,
     node_id INTEGER,
-    key VARCHAR(255) NOT NULL,
+    key TEXT NOT NULL,
     value TEXT,
     UNIQUE (network_id, node_id, key),
     FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index c6b38d60b..a1e5c116d 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -32,13 +32,13 @@ func updateFromV1(tx *sql.Tx) error {
 	stmt := `
 CREATE TABLE config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    key VARCHAR(255) NOT NULL,
+    key TEXT NOT NULL,
     value TEXT,
     UNIQUE (key)
 );
 CREATE TABLE networks (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
+    name TEXT NOT NULL,
     description TEXT,
     state INTEGER NOT NULL DEFAULT 0,
     UNIQUE (name)
@@ -55,7 +55,7 @@ CREATE TABLE networks_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     network_id INTEGER NOT NULL,
     node_id INTEGER,
-    key VARCHAR(255) NOT NULL,
+    key TEXT NOT NULL,
     value TEXT,
     UNIQUE (network_id, node_id, key),
     FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 985e7b2f2..4f3a95a34 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -183,7 +183,7 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 		}
 		if len(nodes) == 1 && nodes[0].Address == "0.0.0.0" {
 			// We're not clustered
-			cluster.id = 1
+			cluster.ID(1)
 			return nil
 		}
 		for _, node := range nodes {
@@ -218,7 +218,9 @@ func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 		if err != nil {
 			// FIXME: we should bubble errors using errors.Wrap()
 			// instead, and check for sql.ErrBadConnection.
-			if strings.Contains(err.Error(), "bad connection") {
+			badConnection := strings.Contains(err.Error(), "bad connection")
+			leadershipLost := strings.Contains(err.Error(), "leadership lost")
+			if badConnection || leadershipLost {
 				logger.Debugf("Retry failed transaction")
 				time.Sleep(time.Second)
 				continue
@@ -229,6 +231,14 @@ func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 	return err
 }
 
+// ID sets the the node ID associated with this cluster instance. It's used for
+// backward-compatibility of all db-related APIs that were written before
+// clustering and don't accept a node ID, so in those cases we automatically
+// use this value as implict node ID.
+func (c *Cluster) ID(id int64) {
+	c.id = id
+}
+
 // Close the database facade.
 func (c *Cluster) Close() error {
 	return c.db.Close()
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index f56e9ea60..ff98871cb 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -11,6 +11,67 @@ import (
 	"github.com/lxc/lxd/shared/api"
 )
 
+// NetworkConfigs returns a map associating each network name to its config
+// values.
+func (c *ClusterTx) NetworkConfigs() (map[string]map[string]string, error) {
+	names, err := query.SelectStrings(c.tx, "SELECT name FROM networks")
+	if err != nil {
+		return nil, err
+	}
+	networks := make(map[string]map[string]string, len(names))
+	for _, name := range names {
+		table := "networks_config JOIN networks ON networks.id=networks_config.network_id"
+		config, err := query.SelectConfig(c.tx, table, fmt.Sprintf("networks.name='%s'", name))
+		if err != nil {
+			return nil, err
+		}
+		networks[name] = config
+	}
+	return networks, nil
+}
+
+// NetworkIDs returns a map associating each network name to its ID.
+func (c *ClusterTx) NetworkIDs() (map[string]int64, error) {
+	networks := []struct {
+		id   int64
+		name string
+	}{}
+	dest := func(i int) []interface{} {
+		networks = append(networks, struct {
+			id   int64
+			name string
+		}{})
+		return []interface{}{&networks[i].id, &networks[i].name}
+
+	}
+	err := query.SelectObjects(c.tx, dest, "SELECT id, name FROM networks")
+	if err != nil {
+		return nil, err
+	}
+	ids := map[string]int64{}
+	for _, network := range networks {
+		ids[network.name] = network.id
+	}
+	return ids, nil
+}
+
+// NetworkConfigAdd adds a new entry in the networks_config table
+func (c *ClusterTx) NetworkConfigAdd(networkID, nodeID int64, config map[string]string) error {
+	return networkConfigAdd(c.tx, networkID, nodeID, config)
+}
+
+// NetworkNodeJoin adds a new entry in the networks_nodes table.
+//
+// It should only be used when a new node joins the cluster, when it's safe to
+// assume that the relevant network has already been created on the joining node,
+// and we just need to track it.
+func (c *ClusterTx) NetworkNodeJoin(networkID, nodeID int64) error {
+	columns := []string{"network_id", "node_id"}
+	values := []interface{}{networkID, nodeID}
+	_, err := query.UpsertObject(c.tx, "networks_nodes", columns, values)
+	return err
+}
+
 func (c *Cluster) Networks() ([]string, error) {
 	q := fmt.Sprintf("SELECT name FROM networks")
 	inargs := []interface{}{}
@@ -171,7 +232,7 @@ func (c *Cluster) NetworkCreate(name, description string, config map[string]stri
 		return -1, err
 	}
 
-	err = NetworkConfigAdd(tx, id, c.id, config)
+	err = networkConfigAdd(tx, id, c.id, config)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -208,7 +269,7 @@ func (c *Cluster) NetworkUpdate(name, description string, config map[string]stri
 		return err
 	}
 
-	err = NetworkConfigAdd(tx, id, c.id, config)
+	err = networkConfigAdd(tx, id, c.id, config)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -222,7 +283,7 @@ func NetworkUpdateDescription(tx *sql.Tx, id int64, description string) error {
 	return err
 }
 
-func NetworkConfigAdd(tx *sql.Tx, networkID, nodeID int64, config map[string]string) error {
+func networkConfigAdd(tx *sql.Tx, networkID, nodeID int64, config map[string]string) error {
 	str := fmt.Sprintf("INSERT INTO networks_config (network_id, node_id, key, value) VALUES(?, ?, ?, ?)")
 	stmt, err := tx.Prepare(str)
 	defer stmt.Close()
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 506eb44a6..e029d1b31 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -27,10 +27,33 @@ func (n NodeInfo) IsDown() bool {
 	return n.Heartbeat.Before(time.Now().Add(-20 * time.Second))
 }
 
+// Node returns the node with the given network address.
+func (c *ClusterTx) Node(address string) (NodeInfo, error) {
+	null := NodeInfo{}
+	nodes, err := c.nodes("address=?", address)
+	if err != nil {
+		return null, err
+	}
+	switch len(nodes) {
+	case 0:
+		return null, NoSuchObjectError
+	case 1:
+		return nodes[0], nil
+	default:
+		return null, fmt.Errorf("more than one node matches")
+	}
+}
+
 // Nodes returns all LXD nodes part of the cluster.
 //
-// If this LXD instance is not clustered, an empty list is returned.
+// If this LXD instance is not clustered, a list with a single node whose
+// address is 0.0.0.0 is returned.
 func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
+	return c.nodes("")
+}
+
+// Nodes returns all LXD nodes part of the cluster.
+func (c *ClusterTx) nodes(where string, args ...interface{}) ([]NodeInfo, error) {
 	nodes := []NodeInfo{}
 	dest := func(i int) []interface{} {
 		nodes = append(nodes, NodeInfo{})
@@ -45,11 +68,12 @@ func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
 		}
 	}
 	stmt := `
-SELECT id, name, address, description, schema, api_extensions, heartbeat
-  FROM nodes
-    ORDER BY id
-`
-	err := query.SelectObjects(c.tx, dest, stmt)
+SELECT id, name, address, description, schema, api_extensions, heartbeat FROM nodes `
+	if where != "" {
+		stmt += fmt.Sprintf("WHERE %s ", where)
+	}
+	stmt += "ORDER BY id"
+	err := query.SelectObjects(c.tx, dest, stmt, args...)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to fecth nodes")
 	}
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index c213580ae..f59a08d1f 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -24,7 +24,7 @@ func TestNodeAdd(t *testing.T) {
 	require.NoError(t, err)
 	require.Len(t, nodes, 2)
 
-	node := nodes[1]
+	node, err := tx.Node("1.2.3.4:666")
 	assert.Equal(t, "buzz", node.Name)
 	assert.Equal(t, "1.2.3.4:666", node.Address)
 	assert.Equal(t, cluster.SchemaVersion, node.Schema)

From e40e44575e6914a0beb8c312e7f530743321f28a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 24 Oct 2017 20:07:23 +0000
Subject: [PATCH 055/227] Add storage-related tables to cluster db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go      | 36 +++++++++++++++++++++++++++++++
 lxd/db/cluster/update.go      | 36 +++++++++++++++++++++++++++++++
 lxd/db/cluster/update_test.go | 50 +++++++++++++++++++++++++++++++------------
 3 files changed, 108 insertions(+), 14 deletions(-)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index f003934b9..eea04804a 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -48,6 +48,42 @@ CREATE TABLE nodes (
     UNIQUE (name),
     UNIQUE (address)
 );
+CREATE TABLE storage_pools (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    driver TEXT NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE storage_pools_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (storage_pool_id, node_id, key),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    description TEXT,
+    UNIQUE (storage_pool_id, name, type),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_volume_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (storage_volume_id, node_id, key),
+    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
 
 INSERT INTO schema (version, updated_at) VALUES (2, strftime("%s"))
 `
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index a1e5c116d..2ed8a5a6a 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -61,6 +61,42 @@ CREATE TABLE networks_config (
     FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
     FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
+CREATE TABLE storage_pools (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    driver TEXT NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE storage_pools_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (storage_pool_id, node_id, key),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    description TEXT,
+    UNIQUE (storage_pool_id, name, type),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_volume_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (storage_volume_id, node_id, key),
+    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
 `
 	_, err := tx.Exec(stmt)
 	return err
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index 97d94898a..76ade6df2 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -1,6 +1,8 @@
 package cluster_test
 
 import (
+	"database/sql"
+	"fmt"
 	"testing"
 	"time"
 
@@ -55,7 +57,24 @@ func TestUpdateFromV1_Network(t *testing.T) {
 	require.Error(t, err)
 }
 
-func TestUpdateFromV1_NetworkConfig(t *testing.T) {
+func TestUpdateFromV1_ConfigTables(t *testing.T) {
+	testConfigTable(t, "networks", func(db *sql.DB) {
+		_, err := db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah', 1)")
+		require.NoError(t, err)
+	})
+	testConfigTable(t, "storage_pools", func(db *sql.DB) {
+		_, err := db.Exec("INSERT INTO storage_pools VALUES (1, 'default', 'dir', '')")
+		require.NoError(t, err)
+	})
+	testConfigTable(t, "storage_volumes", func(db *sql.DB) {
+		_, err := db.Exec("INSERT INTO storage_pools VALUES (1, 'default', 'dir', '')")
+		require.NoError(t, err)
+		_, err = db.Exec("INSERT INTO storage_volumes VALUES (1, 'dev', 1, 1, '')")
+		require.NoError(t, err)
+	})
+}
+
+func testConfigTable(t *testing.T, table string, setup func(db *sql.DB)) {
 	schema := cluster.Schema()
 	db, err := schema.ExerciseUpdate(2, nil)
 	require.NoError(t, err)
@@ -66,24 +85,27 @@ func TestUpdateFromV1_NetworkConfig(t *testing.T) {
 	_, err = db.Exec("INSERT INTO nodes VALUES (2, 'two', '', '2.2.2.2', 666, 999, ?)", time.Now())
 	require.NoError(t, err)
 
-	_, err = db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah', 1)")
-	require.NoError(t, err)
+	stmt := func(format string) string {
+		return fmt.Sprintf(format, table)
+	}
+
+	setup(db)
 
-	_, err = db.Exec("INSERT INTO networks_config VALUES (1, 1, 1, 'bar', 'baz')")
+	_, err = db.Exec(stmt("INSERT INTO %s_config VALUES (1, 1, 1, 'bar', 'baz')"))
 	require.NoError(t, err)
 
-	// Unique constraint on network_id/node_id/key.
-	_, err = db.Exec("INSERT INTO networks_config VALUES (2, 1, 1, 'bar', 'egg')")
+	// Unique constraint on <entity>_id/node_id/key.
+	_, err = db.Exec(stmt("INSERT INTO %s_config VALUES (2, 1, 1, 'bar', 'egg')"))
 	require.Error(t, err)
-	_, err = db.Exec("INSERT INTO networks_config VALUES (3, 1, 2, 'bar', 'egg')")
+	_, err = db.Exec(stmt("INSERT INTO %s_config VALUES (3, 1, 2, 'bar', 'egg')"))
 	require.NoError(t, err)
 
-	// Reference constraint on network_id.
-	_, err = db.Exec("INSERT INTO networks_config VALUES (4, 2, 1, 'fuz', 'buz')")
+	// Reference constraint on <entity>_id.
+	_, err = db.Exec(stmt("INSERT INTO %s_config VALUES (4, 2, 1, 'fuz', 'buz')"))
 	require.Error(t, err)
 
 	// Reference constraint on node_id.
-	_, err = db.Exec("INSERT INTO networks_config VALUES (5, 1, 3, 'fuz', 'buz')")
+	_, err = db.Exec(stmt("INSERT INTO %s_config VALUES (5, 1, 3, 'fuz', 'buz')"))
 	require.Error(t, err)
 
 	// Cascade deletes on node_id
@@ -92,19 +114,19 @@ func TestUpdateFromV1_NetworkConfig(t *testing.T) {
 	n, err := result.RowsAffected()
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), n)
-	result, err = db.Exec("UPDATE networks_config SET value='yuk'")
+	result, err = db.Exec(stmt("UPDATE %s_config SET value='yuk'"))
 	require.NoError(t, err)
 	n, err = result.RowsAffected()
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), n) // Only one row was affected, since the other got deleted
 
-	// Cascade deletes on network_id
-	result, err = db.Exec("DELETE FROM networks")
+	// Cascade deletes on <entity>_id
+	result, err = db.Exec(stmt("DELETE FROM %s"))
 	require.NoError(t, err)
 	n, err = result.RowsAffected()
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), n)
-	result, err = db.Exec("DELETE FROM networks_config")
+	result, err = db.Exec(stmt("DELETE FROM %s_config"))
 	require.NoError(t, err)
 	n, err = result.RowsAffected()
 	require.NoError(t, err)

From d1119bcb80c7f5842bb3806865444bd23e9e9663 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 26 Oct 2017 09:47:49 +0000
Subject: [PATCH 056/227] Add internal "lxd sql" command to run arbitrary SQL
 queries

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_internal.go      | 69 ++++++++++++++++++++++++++++++++++++++++
 lxd/db/db.go             |  8 +++++
 lxd/db/db_export_test.go |  9 ------
 lxd/main.go              |  1 +
 lxd/main_sql.go          | 83 ++++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 161 insertions(+), 9 deletions(-)
 delete mode 100644 lxd/db/db_export_test.go
 create mode 100644 lxd/main_sql.go

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index b5a568d8d..eaea3a906 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -28,6 +28,7 @@ var apiInternal = []Command{
 	internalContainerOnStartCmd,
 	internalContainerOnStopCmd,
 	internalContainersCmd,
+	internalSQLCmd,
 }
 
 func internalReady(d *Daemon, r *http.Request) Response {
@@ -91,10 +92,78 @@ func internalContainerOnStop(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
+type internalSQLPost struct {
+	Query string `json:"query" yaml:"query"`
+}
+
+type internalSQLResult struct {
+	Columns      []string        `json:"columns" yaml:"columns"`
+	Rows         [][]interface{} `json:"rows" yaml:"rows"`
+	RowsAffected int64           `json:"rows_affected" yaml:"rows_affected"`
+}
+
+func internalSQL(d *Daemon, r *http.Request) Response {
+	req := &internalSQLPost{}
+	// Parse the request.
+	err := json.NewDecoder(r.Body).Decode(&req)
+	if err != nil {
+		return BadRequest(err)
+	}
+	db := d.cluster.DB()
+	result := internalSQLResult{}
+	if strings.HasPrefix(req.Query, "SELECT") {
+		rows, err := db.Query(req.Query)
+		if err != nil {
+			return SmartError(err)
+		}
+		defer rows.Close()
+		result.Columns, err = rows.Columns()
+		if err != nil {
+			return SmartError(err)
+		}
+		for rows.Next() {
+			row := make([]interface{}, len(result.Columns))
+			rowPointers := make([]interface{}, len(result.Columns))
+			for i := range row {
+				rowPointers[i] = &row[i]
+			}
+			err := rows.Scan(rowPointers...)
+			if err != nil {
+				return SmartError(err)
+			}
+			for i, column := range row {
+				// Convert bytes to string. This is safe as
+				// long as we don't have any BLOB column type.
+				data, ok := column.([]byte)
+				if ok {
+					row[i] = string(data)
+				}
+			}
+			result.Rows = append(result.Rows, row)
+		}
+		err = rows.Err()
+		if err != nil {
+			return SmartError(err)
+		}
+	} else {
+		r, err := db.Exec(req.Query)
+		if err != nil {
+			return SmartError(err)
+		}
+		result.RowsAffected, err = r.RowsAffected()
+		if err != nil {
+			return SmartError(err)
+		}
+
+	}
+	return SyncResponse(true, result)
+}
+
 var internalShutdownCmd = Command{name: "shutdown", put: internalShutdown}
 var internalReadyCmd = Command{name: "ready", put: internalReady, get: internalWaitReady}
 var internalContainerOnStartCmd = Command{name: "containers/{id}/onstart", get: internalContainerOnStart}
 var internalContainerOnStopCmd = Command{name: "containers/{id}/onstop", get: internalContainerOnStop}
+var internalSQLCmd = Command{name: "sql", post: internalSQL}
 
 func slurpBackupFile(path string) (*backupFile, error) {
 	data, err := ioutil.ReadFile(path)
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 4f3a95a34..76e20ed10 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -244,6 +244,14 @@ func (c *Cluster) Close() error {
 	return c.db.Close()
 }
 
+// DB returns the low level database handle to the cluster database.
+//
+// FIXME: this is used for compatibility with some legacy code, and should be
+//        dropped once there are no call sites left.
+func (c *Cluster) DB() *sql.DB {
+	return c.db
+}
+
 // UpdateSchemasDotGo updates the schema.go files in the local/ and cluster/
 // sub-packages.
 func UpdateSchemasDotGo() error {
diff --git a/lxd/db/db_export_test.go b/lxd/db/db_export_test.go
deleted file mode 100644
index a975c9081..000000000
--- a/lxd/db/db_export_test.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package db
-
-import "database/sql"
-
-// DB returns the low level database handle to the cluster gRPC SQL database
-// handler. Used by tests for introspecing the database with raw SQL.
-func (c *Cluster) DB() *sql.DB {
-	return c.db
-}
diff --git a/lxd/main.go b/lxd/main.go
index e885088c0..a257f8fc3 100644
--- a/lxd/main.go
+++ b/lxd/main.go
@@ -66,4 +66,5 @@ var subcommands = map[string]SubCommand{
 	"netcat":             cmdNetcat,
 	"migratedumpsuccess": cmdMigrateDumpSuccess,
 	"forkproxy":          cmdProxyDevStart,
+	"sql":                cmdSQL,
 }
diff --git a/lxd/main_sql.go b/lxd/main_sql.go
new file mode 100644
index 000000000..e721633bb
--- /dev/null
+++ b/lxd/main_sql.go
@@ -0,0 +1,83 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	lxd "github.com/lxc/lxd/client"
+)
+
+func cmdSQL(args *Args) error {
+	if len(args.Params) != 1 {
+		return fmt.Errorf("Invalid arguments")
+	}
+	query := args.Params[0]
+
+	// Connect to LXD
+	c, err := lxd.ConnectLXDUnix("", nil)
+	if err != nil {
+		return err
+	}
+
+	data := internalSQLPost{
+		Query: query,
+	}
+	response, _, err := c.RawQuery("POST", "/internal/sql", data, "")
+	if err != nil {
+		return err
+	}
+
+	result := internalSQLResult{}
+	err = json.Unmarshal(response.Metadata, &result)
+	if err != nil {
+		return err
+	}
+	if strings.HasPrefix(query, "SELECT") {
+		// Print results in tabular format
+		widths := make([]int, len(result.Columns))
+		for i, column := range result.Columns {
+			widths[i] = len(column)
+		}
+		for _, row := range result.Rows {
+			for i, v := range row {
+				width := 10
+				switch v := v.(type) {
+				case string:
+					width = len(v)
+				case int:
+					width = 6
+				case int64:
+					width = 6
+				case time.Time:
+					width = 12
+				}
+				if width > widths[i] {
+					widths[i] = width
+				}
+			}
+		}
+		format := "|"
+		separator := "+"
+		columns := make([]interface{}, len(result.Columns))
+		for i, column := range result.Columns {
+			format += " %-" + strconv.Itoa(widths[i]) + "v |"
+			columns[i] = column
+			separator += strings.Repeat("-", widths[i]+2) + "+"
+		}
+		format += "\n"
+		separator += "\n"
+		fmt.Printf(separator)
+		fmt.Printf(fmt.Sprintf(format, columns...))
+		fmt.Printf(separator)
+		for _, row := range result.Rows {
+			fmt.Printf(format, row...)
+		}
+		fmt.Printf(separator)
+	} else {
+		fmt.Printf("Rows affected: %d\n", result.RowsAffected)
+	}
+	return nil
+}

From 34a4fbd169f5cb2c1319e5261149f0ce8460af44 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 07:18:49 +0000
Subject: [PATCH 057/227] Use cluster database for storage-related data

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_internal.go            |  12 +--
 lxd/container.go               |   4 +-
 lxd/container_lxc.go           |  24 +++---
 lxd/containers_post.go         |   6 +-
 lxd/daemon_images.go           |   4 +-
 lxd/db/containers.go           |   4 +-
 lxd/db/images.go               |   4 +-
 lxd/db/migration.go            |  31 ++++++-
 lxd/db/migration_test.go       |  19 +++++
 lxd/db/node/schema.go          |  32 --------
 lxd/db/node/update.go          |   4 +
 lxd/db/storage_pools.go        | 122 ++++++++++++++--------------
 lxd/db/storage_volumes.go      |  41 ++++++----
 lxd/images.go                  |   6 +-
 lxd/main_test.go               |   2 +-
 lxd/patches.go                 | 178 +++++++++++++++++++++--------------------
 lxd/profiles.go                |   2 +-
 lxd/profiles_utils.go          |   2 +-
 lxd/storage.go                 |  24 +++---
 lxd/storage_btrfs.go           |   4 +-
 lxd/storage_ceph.go            |   8 +-
 lxd/storage_dir.go             |   4 +-
 lxd/storage_lvm.go             |   8 +-
 lxd/storage_lvm_utils.go       |   2 +-
 lxd/storage_pools.go           |  20 ++---
 lxd/storage_pools_utils.go     |  20 ++---
 lxd/storage_shared.go          |   8 +-
 lxd/storage_utils.go           |   2 +-
 lxd/storage_volumes.go         |  26 +++---
 lxd/storage_volumes_utils.go   |  12 +--
 lxd/storage_zfs.go             |   4 +-
 test/includes/lxd.sh           |   8 +-
 test/suites/backup.sh          |  10 +--
 test/suites/database_update.sh |   6 +-
 34 files changed, 346 insertions(+), 317 deletions(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index eaea3a906..07d8e9ec3 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -257,7 +257,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 
 	// Try to retrieve the storage pool the container supposedly lives on.
 	var poolErr error
-	poolID, pool, poolErr := d.db.StoragePoolGet(containerPoolName)
+	poolID, pool, poolErr := d.cluster.StoragePoolGet(containerPoolName)
 	if poolErr != nil {
 		if poolErr != db.NoSuchObjectError {
 			return SmartError(poolErr)
@@ -279,7 +279,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 			return SmartError(err)
 		}
 
-		poolID, err = d.db.StoragePoolGetID(containerPoolName)
+		poolID, err = d.cluster.StoragePoolGetID(containerPoolName)
 		if err != nil {
 			return SmartError(err)
 		}
@@ -574,7 +574,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check if a storage volume entry for the container already exists.
-	_, volume, ctVolErr := d.db.StoragePoolVolumeGetType(
+	_, volume, ctVolErr := d.cluster.StoragePoolVolumeGetType(
 		req.Name, storagePoolVolumeTypeContainer, poolID)
 	if ctVolErr != nil {
 		if ctVolErr != db.NoSuchObjectError {
@@ -624,7 +624,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 
 		// Remove the storage volume db entry for the container since
 		// force was specified.
-		err := d.db.StoragePoolVolumeDelete(req.Name,
+		err := d.cluster.StoragePoolVolumeDelete(req.Name,
 			storagePoolVolumeTypeContainer, poolID)
 		if err != nil {
 			return SmartError(err)
@@ -657,7 +657,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 		}
 
 		// Check if a storage volume entry for the snapshot already exists.
-		_, _, csVolErr := d.db.StoragePoolVolumeGetType(snap.Name,
+		_, _, csVolErr := d.cluster.StoragePoolVolumeGetType(snap.Name,
 			storagePoolVolumeTypeContainer, poolID)
 		if csVolErr != nil {
 			if csVolErr != db.NoSuchObjectError {
@@ -680,7 +680,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 		}
 
 		if csVolErr == nil {
-			err := d.db.StoragePoolVolumeDelete(snap.Name,
+			err := d.cluster.StoragePoolVolumeDelete(snap.Name,
 				storagePoolVolumeTypeContainer, poolID)
 			if err != nil {
 				return SmartError(err)
diff --git a/lxd/container.go b/lxd/container.go
index 1a75e0a67..ca64aa952 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -307,7 +307,7 @@ func containerGetRootDiskDevice(devices types.Devices) (string, types.Device, er
 	return "", types.Device{}, fmt.Errorf("No root device could be found.")
 }
 
-func containerValidDevices(db *db.Node, devices types.Devices, profile bool, expanded bool) error {
+func containerValidDevices(db *db.Cluster, devices types.Devices, profile bool, expanded bool) error {
 	// Empty device list
 	if devices == nil {
 		return nil
@@ -836,7 +836,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Validate container devices
-	err = containerValidDevices(s.Node, args.Devices, false, false)
+	err = containerValidDevices(s.Cluster, args.Devices, false, false)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index adb5856a7..f70b60efa 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -307,7 +307,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 		return nil, err
 	}
 
-	err = containerValidDevices(s.Node, c.expandedDevices, false, true)
+	err = containerValidDevices(s.Cluster, c.expandedDevices, false, true)
 	if err != nil {
 		c.Delete()
 		logger.Error("Failed creating container", ctxMap)
@@ -329,7 +329,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	storagePool := rootDiskDevice["pool"]
 
 	// Get the storage pool ID for the container
-	poolID, pool, err := s.Node.StoragePoolGet(storagePool)
+	poolID, pool, err := s.Cluster.StoragePoolGet(storagePool)
 	if err != nil {
 		c.Delete()
 		return nil, err
@@ -343,7 +343,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	}
 
 	// Create a new database entry for the container's storage volume
-	_, err = s.Node.StoragePoolVolumeCreate(args.Name, "", storagePoolVolumeTypeContainer, poolID, volumeConfig)
+	_, err = s.Cluster.StoragePoolVolumeCreate(args.Name, "", storagePoolVolumeTypeContainer, poolID, volumeConfig)
 	if err != nil {
 		c.Delete()
 		return nil, err
@@ -353,7 +353,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	cStorage, err := storagePoolVolumeContainerCreateInit(s, storagePool, args.Name)
 	if err != nil {
 		c.Delete()
-		s.Node.StoragePoolVolumeDelete(args.Name, storagePoolVolumeTypeContainer, poolID)
+		s.Cluster.StoragePoolVolumeDelete(args.Name, storagePoolVolumeTypeContainer, poolID)
 		logger.Error("Failed to initialize container storage", ctxMap)
 		return nil, err
 	}
@@ -3132,7 +3132,7 @@ func (c *containerLXC) Delete() error {
 		poolID, _, _ := c.storage.GetContainerPoolInfo()
 
 		// Remove volume from storage pool.
-		err := c.db.StoragePoolVolumeDelete(c.Name(), storagePoolVolumeTypeContainer, poolID)
+		err := c.state.Cluster.StoragePoolVolumeDelete(c.Name(), storagePoolVolumeTypeContainer, poolID)
 		if err != nil {
 			return err
 		}
@@ -3226,7 +3226,7 @@ func (c *containerLXC) Rename(newName string) error {
 
 	// Rename storage volume for the container.
 	poolID, _, _ := c.storage.GetContainerPoolInfo()
-	err = c.db.StoragePoolVolumeRename(oldName, newName, storagePoolVolumeTypeContainer, poolID)
+	err = c.state.Cluster.StoragePoolVolumeRename(oldName, newName, storagePoolVolumeTypeContainer, poolID)
 	if err != nil {
 		logger.Error("Failed renaming storage volume", ctxMap)
 		return err
@@ -3251,7 +3251,7 @@ func (c *containerLXC) Rename(newName string) error {
 			}
 
 			// Rename storage volume for the snapshot.
-			err = c.db.StoragePoolVolumeRename(sname, newSnapshotName, storagePoolVolumeTypeContainer, poolID)
+			err = c.state.Cluster.StoragePoolVolumeRename(sname, newSnapshotName, storagePoolVolumeTypeContainer, poolID)
 			if err != nil {
 				logger.Error("Failed renaming storage volume", ctxMap)
 				return err
@@ -3380,12 +3380,12 @@ func writeBackupFile(c container) error {
 	}
 
 	s := c.DaemonState()
-	poolID, pool, err := s.Node.StoragePoolGet(poolName)
+	poolID, pool, err := s.Cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
 
-	_, volume, err := s.Node.StoragePoolVolumeGetType(c.Name(), storagePoolVolumeTypeContainer, poolID)
+	_, volume, err := s.Cluster.StoragePoolVolumeGetType(c.Name(), storagePoolVolumeTypeContainer, poolID)
 	if err != nil {
 		return err
 	}
@@ -3444,7 +3444,7 @@ func (c *containerLXC) Update(args db.ContainerArgs, userRequested bool) error {
 	}
 
 	// Validate the new devices
-	err = containerValidDevices(c.db, args.Devices, false, false)
+	err = containerValidDevices(c.state.Cluster, args.Devices, false, false)
 	if err != nil {
 		return err
 	}
@@ -3605,7 +3605,7 @@ func (c *containerLXC) Update(args db.ContainerArgs, userRequested bool) error {
 	}
 
 	// Do some validation of the devices diff
-	err = containerValidDevices(c.db, c.expandedDevices, false, true)
+	err = containerValidDevices(c.state.Cluster, c.expandedDevices, false, true)
 	if err != nil {
 		return err
 	}
@@ -7976,7 +7976,7 @@ func (c *containerLXC) StatePath() string {
 }
 
 func (c *containerLXC) StoragePool() (string, error) {
-	poolName, err := c.db.ContainerPool(c.Name())
+	poolName, err := c.state.Cluster.ContainerPool(c.Name())
 	if err != nil {
 		return "", err
 	}
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 5ac3e0b34..60e120453 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -206,7 +206,7 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 
 	// Handle copying/moving between two storage-api LXD instances.
 	if storagePool != "" {
-		_, err := d.db.StoragePoolGetID(storagePool)
+		_, err := d.cluster.StoragePoolGetID(storagePool)
 		if err == db.NoSuchObjectError {
 			storagePool = ""
 			// Unset the local root disk device storage pool if not
@@ -235,7 +235,7 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 	logger.Debugf("No valid storage pool in the container's local root disk device and profiles found.")
 	// If there is just a single pool in the database, use that
 	if storagePool == "" {
-		pools, err := d.db.StoragePools()
+		pools, err := d.cluster.StoragePools()
 		if err != nil {
 			if err == db.NoSuchObjectError {
 				return BadRequest(fmt.Errorf("This LXD instance does not have any storage pools configured."))
@@ -524,7 +524,7 @@ func containersPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// If no storage pool is found, error out.
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil || len(pools) == 0 {
 		return BadRequest(fmt.Errorf("No storage pool found. Please create a new storage pool."))
 	}
diff --git a/lxd/daemon_images.go b/lxd/daemon_images.go
index e4b6aaf44..853c8b1fc 100644
--- a/lxd/daemon_images.go
+++ b/lxd/daemon_images.go
@@ -255,13 +255,13 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 		}
 
 		// Get the ID of the target storage pool
-		poolID, err := d.db.StoragePoolGetID(storagePool)
+		poolID, err := d.cluster.StoragePoolGetID(storagePool)
 		if err != nil {
 			return nil, err
 		}
 
 		// Check if the image is already in the pool
-		poolIDs, err := d.db.ImageGetPools(info.Fingerprint)
+		poolIDs, err := d.cluster.ImageGetPools(info.Fingerprint)
 		if err != nil {
 			return nil, err
 		}
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index fa74d13a8..e51df08d3 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -509,7 +509,7 @@ func (n *Node) ContainerNextSnapshot(name string) int {
 }
 
 // Get the storage pool of a given container.
-func (n *Node) ContainerPool(containerName string) (string, error) {
+func (c *Cluster) ContainerPool(containerName string) (string, error) {
 	// Get container storage volume. Since container names are globally
 	// unique, and their storage volumes carry the same name, their storage
 	// volumes are unique too.
@@ -520,7 +520,7 @@ WHERE storage_volumes.name=? AND storage_volumes.type=?`
 	inargs := []interface{}{containerName, StoragePoolVolumeTypeContainer}
 	outargs := []interface{}{&poolName}
 
-	err := dbQueryRowScan(n.db, query, inargs, outargs)
+	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return "", NoSuchObjectError
diff --git a/lxd/db/images.go b/lxd/db/images.go
index 7e63f8b07..244d5777d 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -530,13 +530,13 @@ func (n *Node) ImageInsert(fp string, fname string, sz int64, public bool, autoU
 }
 
 // Get the names of all storage pools on which a given image exists.
-func (n *Node) ImageGetPools(imageFingerprint string) ([]int64, error) {
+func (c *Cluster) ImageGetPools(imageFingerprint string) ([]int64, error) {
 	poolID := int64(-1)
 	query := "SELECT storage_pool_id FROM storage_volumes WHERE name=? AND type=?"
 	inargs := []interface{}{imageFingerprint, StoragePoolVolumeTypeImage}
 	outargs := []interface{}{poolID}
 
-	result, err := queryScan(n.db, query, inargs, outargs)
+	result, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return []int64{}, err
 	}
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 084c14b85..f9d2a7f64 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -76,10 +76,33 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 				}
 			}
 			columns := dump.Schema[table]
+
+			nullNodeID := false // Whether config-related rows should have a NULL node ID
+			appendNodeID := func() {
+				columns = append(columns, "node_id")
+				if nullNodeID {
+					row = append(row, nil)
+				} else {
+					row = append(row, int64(1))
+				}
+			}
+
 			switch table {
 			case "networks_config":
-				columns = append(columns, "node_id")
-				row = append(row, int64(1))
+				appendNodeID()
+			case "storage_pools_config":
+				// The "source" config key is the only one
+				// which is not global to the cluster, so all
+				// other keys will have a NULL node_id.
+				for i, column := range columns {
+					if column == "key" && row[i] != "source" {
+						nullNodeID = true
+						break
+					}
+				}
+				appendNodeID()
+			case "storage_volumes_config":
+				appendNodeID()
 			}
 			stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
 			stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
@@ -115,4 +138,8 @@ var preClusteringTables = []string{
 	"config",
 	"networks",
 	"networks_config",
+	"storage_pools",
+	"storage_pools_config",
+	"storage_volumes",
+	"storage_volumes_config",
 }
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index ac201ace4..b590907b4 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -58,6 +58,20 @@ func TestImportPreClusteringData(t *testing.T) {
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), id)
 	assert.Equal(t, "true", network.Config["ipv4.nat"])
+
+	// storage
+	pools, err := cluster.StoragePools()
+	require.NoError(t, err)
+	assert.Equal(t, []string{"default"}, pools)
+	id, pool, err := cluster.StoragePoolGet("default")
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), id)
+	assert.Equal(t, "/foo/bar", pool.Config["source"])
+	assert.Equal(t, "123", pool.Config["size"])
+	volumes, err := cluster.StoragePoolVolumesGet(id, []int{1})
+	require.NoError(t, err)
+	assert.Len(t, volumes, 1)
+	assert.Equal(t, "/foo/bar", volumes[0].Config["source"])
 }
 
 // Return a sql.Tx against a memory database populated with pre-clustering
@@ -74,6 +88,11 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
 		"INSERT INTO networks VALUES(1, 'lxcbr0', 'LXD bridge')",
 		"INSERT INTO networks_config VALUES(1, 1, 'ipv4.nat', 'true')",
+		"INSERT INTO storage_pools VALUES (1, 'default', 'dir', '')",
+		"INSERT INTO storage_pools_config VALUES(1, 1, 'source', '/foo/bar')",
+		"INSERT INTO storage_pools_config VALUES(2, 1, 'size', '123')",
+		"INSERT INTO storage_volumes VALUES (1, 'dev', 1, 1, '')",
+		"INSERT INTO storage_volumes_config VALUES(1, 1, 'source', '/foo/bar')",
 	}
 	for _, stmt := range stmts {
 		_, err := tx.Exec(stmt)
diff --git a/lxd/db/node/schema.go b/lxd/db/node/schema.go
index 7a0511f92..c8105de7f 100644
--- a/lxd/db/node/schema.go
+++ b/lxd/db/node/schema.go
@@ -146,38 +146,6 @@ CREATE TABLE raft_nodes (
     address TEXT NOT NULL,
     UNIQUE (address)
 );
-CREATE TABLE storage_pools (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    driver VARCHAR(255) NOT NULL,
-    description TEXT,
-    UNIQUE (name)
-);
-CREATE TABLE storage_pools_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    storage_pool_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    UNIQUE (storage_pool_id, key),
-    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
-);
-CREATE TABLE storage_volumes (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    storage_pool_id INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    description TEXT,
-    UNIQUE (storage_pool_id, name, type),
-    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
-);
-CREATE TABLE storage_volumes_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    storage_volume_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    UNIQUE (storage_volume_id, key),
-    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE
-);
 
 INSERT INTO schema (version, updated_at) VALUES (37, strftime("%s"))
 `
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index 0866bd63d..1e20ca8ba 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -120,6 +120,10 @@ CREATE TABLE raft_nodes (
 DELETE FROM config WHERE NOT key='core.https_address';
 DROP TABLE networks_config;
 DROP TABLE networks;
+DROP TABLE storage_volumes_config;
+DROP TABLE storage_volumes;
+DROP TABLE storage_pools_config;
+DROP TABLE storage_pools;
 `
 	_, err := tx.Exec(stmts)
 	return err
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 0f2036633..99392f90c 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -10,13 +10,13 @@ import (
 )
 
 // Get all storage pools.
-func (n *Node) StoragePools() ([]string, error) {
+func (c *Cluster) StoragePools() ([]string, error) {
 	var name string
 	query := "SELECT name FROM storage_pools"
 	inargs := []interface{}{}
 	outargs := []interface{}{name}
 
-	result, err := queryScan(n.db, query, inargs, outargs)
+	result, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return []string{}, err
 	}
@@ -34,13 +34,13 @@ func (n *Node) StoragePools() ([]string, error) {
 }
 
 // Get the names of all storage volumes attached to a given storage pool.
-func (n *Node) StoragePoolsGetDrivers() ([]string, error) {
+func (c *Cluster) StoragePoolsGetDrivers() ([]string, error) {
 	var poolDriver string
 	query := "SELECT DISTINCT driver FROM storage_pools"
 	inargs := []interface{}{}
 	outargs := []interface{}{poolDriver}
 
-	result, err := queryScan(n.db, query, inargs, outargs)
+	result, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return []string{}, err
 	}
@@ -58,13 +58,13 @@ func (n *Node) StoragePoolsGetDrivers() ([]string, error) {
 }
 
 // Get id of a single storage pool.
-func (n *Node) StoragePoolGetID(poolName string) (int64, error) {
+func (c *Cluster) StoragePoolGetID(poolName string) (int64, error) {
 	poolID := int64(-1)
 	query := "SELECT id FROM storage_pools WHERE name=?"
 	inargs := []interface{}{poolName}
 	outargs := []interface{}{&poolID}
 
-	err := dbQueryRowScan(n.db, query, inargs, outargs)
+	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return -1, NoSuchObjectError
@@ -75,7 +75,7 @@ func (n *Node) StoragePoolGetID(poolName string) (int64, error) {
 }
 
 // Get a single storage pool.
-func (n *Node) StoragePoolGet(poolName string) (int64, *api.StoragePool, error) {
+func (c *Cluster) StoragePoolGet(poolName string) (int64, *api.StoragePool, error) {
 	var poolDriver string
 	poolID := int64(-1)
 	description := sql.NullString{}
@@ -84,7 +84,7 @@ func (n *Node) StoragePoolGet(poolName string) (int64, *api.StoragePool, error)
 	inargs := []interface{}{poolName}
 	outargs := []interface{}{&poolID, &poolDriver, &description}
 
-	err := dbQueryRowScan(n.db, query, inargs, outargs)
+	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return -1, nil, NoSuchObjectError
@@ -92,7 +92,7 @@ func (n *Node) StoragePoolGet(poolName string) (int64, *api.StoragePool, error)
 		return -1, nil, err
 	}
 
-	config, err := n.StoragePoolConfigGet(poolID)
+	config, err := c.StoragePoolConfigGet(poolID)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -108,13 +108,13 @@ func (n *Node) StoragePoolGet(poolName string) (int64, *api.StoragePool, error)
 }
 
 // Get config of a storage pool.
-func (n *Node) StoragePoolConfigGet(poolID int64) (map[string]string, error) {
+func (c *Cluster) StoragePoolConfigGet(poolID int64) (map[string]string, error) {
 	var key, value string
-	query := "SELECT key, value FROM storage_pools_config WHERE storage_pool_id=?"
-	inargs := []interface{}{poolID}
+	query := "SELECT key, value FROM storage_pools_config WHERE storage_pool_id=? AND (node_id=? OR node_id IS NULL)"
+	inargs := []interface{}{poolID, c.id}
 	outargs := []interface{}{key, value}
 
-	results, err := queryScan(n.db, query, inargs, outargs)
+	results, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return nil, err
 	}
@@ -132,8 +132,8 @@ func (n *Node) StoragePoolConfigGet(poolID int64) (map[string]string, error) {
 }
 
 // Create new storage pool.
-func (n *Node) StoragePoolCreate(poolName string, poolDescription string, poolDriver string, poolConfig map[string]string) (int64, error) {
-	tx, err := begin(n.db)
+func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poolDriver string, poolConfig map[string]string) (int64, error) {
+	tx, err := begin(c.db)
 	if err != nil {
 		return -1, err
 	}
@@ -150,7 +150,7 @@ func (n *Node) StoragePoolCreate(poolName string, poolDescription string, poolDr
 		return -1, err
 	}
 
-	err = StoragePoolConfigAdd(tx, id, poolConfig)
+	err = StoragePoolConfigAdd(tx, id, c.id, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -165,8 +165,8 @@ func (n *Node) StoragePoolCreate(poolName string, poolDescription string, poolDr
 }
 
 // Add new storage pool config.
-func StoragePoolConfigAdd(tx *sql.Tx, poolID int64, poolConfig map[string]string) error {
-	str := "INSERT INTO storage_pools_config (storage_pool_id, key, value) VALUES(?, ?, ?)"
+func StoragePoolConfigAdd(tx *sql.Tx, poolID, nodeID int64, poolConfig map[string]string) error {
+	str := "INSERT INTO storage_pools_config (storage_pool_id, node_id, key, value) VALUES(?, ?, ?, ?)"
 	stmt, err := tx.Prepare(str)
 	defer stmt.Close()
 	if err != nil {
@@ -177,8 +177,14 @@ func StoragePoolConfigAdd(tx *sql.Tx, poolID int64, poolConfig map[string]string
 		if v == "" {
 			continue
 		}
+		var nodeIDValue interface{}
+		if k != "source" {
+			nodeIDValue = nil
+		} else {
+			nodeIDValue = nodeID
+		}
 
-		_, err = stmt.Exec(poolID, k, v)
+		_, err = stmt.Exec(poolID, nodeIDValue, k, v)
 		if err != nil {
 			return err
 		}
@@ -188,13 +194,13 @@ func StoragePoolConfigAdd(tx *sql.Tx, poolID int64, poolConfig map[string]string
 }
 
 // Update storage pool.
-func (n *Node) StoragePoolUpdate(poolName, description string, poolConfig map[string]string) error {
-	poolID, _, err := n.StoragePoolGet(poolName)
+func (c *Cluster) StoragePoolUpdate(poolName, description string, poolConfig map[string]string) error {
+	poolID, _, err := c.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -205,13 +211,13 @@ func (n *Node) StoragePoolUpdate(poolName, description string, poolConfig map[st
 		return err
 	}
 
-	err = StoragePoolConfigClear(tx, poolID)
+	err = StoragePoolConfigClear(tx, poolID, c.id)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = StoragePoolConfigAdd(tx, poolID, poolConfig)
+	err = StoragePoolConfigAdd(tx, poolID, c.id, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -227,8 +233,8 @@ func StoragePoolUpdateDescription(tx *sql.Tx, id int64, description string) erro
 }
 
 // Delete storage pool config.
-func StoragePoolConfigClear(tx *sql.Tx, poolID int64) error {
-	_, err := tx.Exec("DELETE FROM storage_pools_config WHERE storage_pool_id=?", poolID)
+func StoragePoolConfigClear(tx *sql.Tx, poolID, nodeID int64) error {
+	_, err := tx.Exec("DELETE FROM storage_pools_config WHERE storage_pool_id=? AND (node_id=? OR node_id IS NULL)", poolID, nodeID)
 	if err != nil {
 		return err
 	}
@@ -237,13 +243,13 @@ func StoragePoolConfigClear(tx *sql.Tx, poolID int64) error {
 }
 
 // Delete storage pool.
-func (n *Node) StoragePoolDelete(poolName string) (*api.StoragePool, error) {
-	poolID, pool, err := n.StoragePoolGet(poolName)
+func (c *Cluster) StoragePoolDelete(poolName string) (*api.StoragePool, error) {
+	poolID, pool, err := c.StoragePoolGet(poolName)
 	if err != nil {
 		return nil, err
 	}
 
-	_, err = exec(n.db, "DELETE FROM storage_pools WHERE id=?", poolID)
+	_, err = exec(c.db, "DELETE FROM storage_pools WHERE id=?", poolID)
 	if err != nil {
 		return nil, err
 	}
@@ -252,13 +258,13 @@ func (n *Node) StoragePoolDelete(poolName string) (*api.StoragePool, error) {
 }
 
 // Get the names of all storage volumes attached to a given storage pool.
-func (n *Node) StoragePoolVolumesGetNames(poolID int64) (int, error) {
+func (c *Cluster) StoragePoolVolumesGetNames(poolID int64) (int, error) {
 	var volumeName string
 	query := "SELECT name FROM storage_volumes WHERE storage_pool_id=?"
 	inargs := []interface{}{poolID}
 	outargs := []interface{}{volumeName}
 
-	result, err := queryScan(n.db, query, inargs, outargs)
+	result, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return -1, err
 	}
@@ -271,17 +277,17 @@ func (n *Node) StoragePoolVolumesGetNames(poolID int64) (int, error) {
 }
 
 // Get all storage volumes attached to a given storage pool.
-func (n *Node) StoragePoolVolumesGet(poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
+func (c *Cluster) StoragePoolVolumesGet(poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
 	// Get all storage volumes of all types attached to a given storage
 	// pool.
 	result := []*api.StorageVolume{}
 	for _, volumeType := range volumeTypes {
-		volumeNames, err := n.StoragePoolVolumesGetType(volumeType, poolID)
+		volumeNames, err := c.StoragePoolVolumesGetType(volumeType, poolID)
 		if err != nil && err != sql.ErrNoRows {
 			return nil, err
 		}
 		for _, volumeName := range volumeNames {
-			_, volume, err := n.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+			_, volume, err := c.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 			if err != nil {
 				return nil, err
 			}
@@ -298,13 +304,13 @@ func (n *Node) StoragePoolVolumesGet(poolID int64, volumeTypes []int) ([]*api.St
 
 // Get all storage volumes attached to a given storage pool of a given volume
 // type.
-func (n *Node) StoragePoolVolumesGetType(volumeType int, poolID int64) ([]string, error) {
+func (c *Cluster) StoragePoolVolumesGetType(volumeType int, poolID int64) ([]string, error) {
 	var poolName string
 	query := "SELECT name FROM storage_volumes WHERE storage_pool_id=? AND type=?"
 	inargs := []interface{}{poolID, volumeType}
 	outargs := []interface{}{poolName}
 
-	result, err := queryScan(n.db, query, inargs, outargs)
+	result, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return []string{}, err
 	}
@@ -318,18 +324,18 @@ func (n *Node) StoragePoolVolumesGetType(volumeType int, poolID int64) ([]string
 }
 
 // Get a single storage volume attached to a given storage pool of a given type.
-func (n *Node) StoragePoolVolumeGetType(volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
-	volumeID, err := n.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
+func (c *Cluster) StoragePoolVolumeGetType(volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
+	volumeID, err := c.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
 	if err != nil {
 		return -1, nil, err
 	}
 
-	volumeConfig, err := n.StorageVolumeConfigGet(volumeID)
+	volumeConfig, err := c.StorageVolumeConfigGet(volumeID)
 	if err != nil {
 		return -1, nil, err
 	}
 
-	volumeDescription, err := n.StorageVolumeDescriptionGet(volumeID)
+	volumeDescription, err := c.StorageVolumeDescriptionGet(volumeID)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -350,24 +356,24 @@ func (n *Node) StoragePoolVolumeGetType(volumeName string, volumeType int, poolI
 }
 
 // Update storage volume attached to a given storage pool.
-func (n *Node) StoragePoolVolumeUpdate(volumeName string, volumeType int, poolID int64, volumeDescription string, volumeConfig map[string]string) error {
-	volumeID, _, err := n.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+func (c *Cluster) StoragePoolVolumeUpdate(volumeName string, volumeType int, poolID int64, volumeDescription string, volumeConfig map[string]string) error {
+	volumeID, _, err := c.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return err
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
 
-	err = StorageVolumeConfigClear(tx, volumeID)
+	err = StorageVolumeConfigClear(tx, volumeID, c.id)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = StorageVolumeConfigAdd(tx, volumeID, volumeConfig)
+	err = StorageVolumeConfigAdd(tx, volumeID, c.id, volumeConfig)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -383,13 +389,13 @@ func (n *Node) StoragePoolVolumeUpdate(volumeName string, volumeType int, poolID
 }
 
 // Delete storage volume attached to a given storage pool.
-func (n *Node) StoragePoolVolumeDelete(volumeName string, volumeType int, poolID int64) error {
-	volumeID, _, err := n.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+func (c *Cluster) StoragePoolVolumeDelete(volumeName string, volumeType int, poolID int64) error {
+	volumeID, _, err := c.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return err
 	}
 
-	_, err = exec(n.db, "DELETE FROM storage_volumes WHERE id=?", volumeID)
+	_, err = exec(c.db, "DELETE FROM storage_volumes WHERE id=?", volumeID)
 	if err != nil {
 		return err
 	}
@@ -398,13 +404,13 @@ func (n *Node) StoragePoolVolumeDelete(volumeName string, volumeType int, poolID
 }
 
 // Rename storage volume attached to a given storage pool.
-func (n *Node) StoragePoolVolumeRename(oldVolumeName string, newVolumeName string, volumeType int, poolID int64) error {
-	volumeID, _, err := n.StoragePoolVolumeGetType(oldVolumeName, volumeType, poolID)
+func (c *Cluster) StoragePoolVolumeRename(oldVolumeName string, newVolumeName string, volumeType int, poolID int64) error {
+	volumeID, _, err := c.StoragePoolVolumeGetType(oldVolumeName, volumeType, poolID)
 	if err != nil {
 		return err
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -419,8 +425,8 @@ func (n *Node) StoragePoolVolumeRename(oldVolumeName string, newVolumeName strin
 }
 
 // Create new storage volume attached to a given storage pool.
-func (n *Node) StoragePoolVolumeCreate(volumeName, volumeDescription string, volumeType int, poolID int64, volumeConfig map[string]string) (int64, error) {
-	tx, err := begin(n.db)
+func (c *Cluster) StoragePoolVolumeCreate(volumeName, volumeDescription string, volumeType int, poolID int64, volumeConfig map[string]string) (int64, error) {
+	tx, err := begin(c.db)
 	if err != nil {
 		return -1, err
 	}
@@ -438,7 +444,7 @@ func (n *Node) StoragePoolVolumeCreate(volumeName, volumeDescription string, vol
 		return -1, err
 	}
 
-	err = StorageVolumeConfigAdd(tx, volumeID, volumeConfig)
+	err = StorageVolumeConfigAdd(tx, volumeID, c.id, volumeConfig)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -454,7 +460,7 @@ func (n *Node) StoragePoolVolumeCreate(volumeName, volumeDescription string, vol
 
 // Get ID of a storage volume on a given storage pool of a given storage volume
 // type.
-func (n *Node) StoragePoolVolumeGetTypeID(volumeName string, volumeType int, poolID int64) (int64, error) {
+func (c *Cluster) StoragePoolVolumeGetTypeID(volumeName string, volumeType int, poolID int64) (int64, error) {
 	volumeID := int64(-1)
 	query := `SELECT storage_volumes.id
 FROM storage_volumes
@@ -465,7 +471,7 @@ AND storage_volumes.name=? AND storage_volumes.type=?`
 	inargs := []interface{}{poolID, volumeName, volumeType}
 	outargs := []interface{}{&volumeID}
 
-	err := dbQueryRowScan(n.db, query, inargs, outargs)
+	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return -1, NoSuchObjectError
 	}
@@ -505,7 +511,7 @@ func StoragePoolVolumeTypeToName(volumeType int) (string, error) {
 	return "", fmt.Errorf("invalid storage volume type")
 }
 
-func (n *Node) StoragePoolInsertZfsDriver() error {
-	_, err := exec(n.db, "UPDATE storage_pools SET driver='zfs', description='' WHERE driver=''")
+func (c *Cluster) StoragePoolInsertZfsDriver() error {
+	_, err := exec(c.db, "UPDATE storage_pools SET driver='zfs', description='' WHERE driver=''")
 	return err
 }
diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index 49da9d45a..95d164234 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -2,18 +2,20 @@ package db
 
 import (
 	"database/sql"
+	"fmt"
 
+	"github.com/lxc/lxd/lxd/db/query"
 	_ "github.com/mattn/go-sqlite3"
 )
 
 // Get config of a storage volume.
-func (n *Node) StorageVolumeConfigGet(volumeID int64) (map[string]string, error) {
+func (c *Cluster) StorageVolumeConfigGet(volumeID int64) (map[string]string, error) {
 	var key, value string
-	query := "SELECT key, value FROM storage_volumes_config WHERE storage_volume_id=?"
-	inargs := []interface{}{volumeID}
+	query := "SELECT key, value FROM storage_volumes_config WHERE storage_volume_id=? AND node_id=?"
+	inargs := []interface{}{volumeID, c.id}
 	outargs := []interface{}{key, value}
 
-	results, err := queryScan(n.db, query, inargs, outargs)
+	results, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return nil, err
 	}
@@ -31,13 +33,13 @@ func (n *Node) StorageVolumeConfigGet(volumeID int64) (map[string]string, error)
 }
 
 // Get the description of a storage volume.
-func (n *Node) StorageVolumeDescriptionGet(volumeID int64) (string, error) {
+func (c *Cluster) StorageVolumeDescriptionGet(volumeID int64) (string, error) {
 	description := sql.NullString{}
 	query := "SELECT description FROM storage_volumes WHERE id=?"
 	inargs := []interface{}{volumeID}
 	outargs := []interface{}{&description}
 
-	err := dbQueryRowScan(n.db, query, inargs, outargs)
+	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return "", NoSuchObjectError
@@ -54,8 +56,8 @@ func StorageVolumeDescriptionUpdate(tx *sql.Tx, volumeID int64, description stri
 }
 
 // Add new storage volume config into database.
-func StorageVolumeConfigAdd(tx *sql.Tx, volumeID int64, volumeConfig map[string]string) error {
-	str := "INSERT INTO storage_volumes_config (storage_volume_id, key, value) VALUES(?, ?, ?)"
+func StorageVolumeConfigAdd(tx *sql.Tx, volumeID, nodeID int64, volumeConfig map[string]string) error {
+	str := "INSERT INTO storage_volumes_config (storage_volume_id, node_id, key, value) VALUES(?, ?, ?, ?)"
 	stmt, err := tx.Prepare(str)
 	defer stmt.Close()
 	if err != nil {
@@ -67,7 +69,7 @@ func StorageVolumeConfigAdd(tx *sql.Tx, volumeID int64, volumeConfig map[string]
 			continue
 		}
 
-		_, err = stmt.Exec(volumeID, k, v)
+		_, err = stmt.Exec(volumeID, nodeID, k, v)
 		if err != nil {
 			return err
 		}
@@ -77,8 +79,8 @@ func StorageVolumeConfigAdd(tx *sql.Tx, volumeID int64, volumeConfig map[string]
 }
 
 // Delete storage volume config.
-func StorageVolumeConfigClear(tx *sql.Tx, volumeID int64) error {
-	_, err := tx.Exec("DELETE FROM storage_volumes_config WHERE storage_volume_id=?", volumeID)
+func StorageVolumeConfigClear(tx *sql.Tx, volumeID, nodeID int64) error {
+	_, err := tx.Exec("DELETE FROM storage_volumes_config WHERE storage_volume_id=? AND node_id", volumeID, nodeID)
 	if err != nil {
 		return err
 	}
@@ -86,18 +88,25 @@ func StorageVolumeConfigClear(tx *sql.Tx, volumeID int64) error {
 	return nil
 }
 
-func (n *Node) StorageVolumeCleanupImages() error {
-	_, err := exec(n.db, "DELETE FROM storage_volumes WHERE type=? AND name NOT IN (SELECT fingerprint FROM images);", StoragePoolVolumeTypeImage)
+func (c *Cluster) StorageVolumeCleanupImages(fingerprints []string) error {
+	stmt := fmt.Sprintf(
+		"DELETE FROM storage_volumes WHERE type=? AND name NOT IN %s",
+		query.Params(len(fingerprints)))
+	args := []interface{}{StoragePoolVolumeTypeImage}
+	for _, fingerprint := range fingerprints {
+		args = append(args, fingerprint)
+	}
+	_, err := exec(c.db, stmt, args...)
 	return err
 }
 
-func (n *Node) StorageVolumeMoveToLVMThinPoolNameKey() error {
-	_, err := exec(n.db, "UPDATE storage_pools_config SET key='lvm.thinpool_name' WHERE key='volume.lvm.thinpool_name';")
+func (c *Cluster) StorageVolumeMoveToLVMThinPoolNameKey() error {
+	_, err := exec(c.db, "UPDATE storage_pools_config SET key='lvm.thinpool_name' WHERE key='volume.lvm.thinpool_name';")
 	if err != nil {
 		return err
 	}
 
-	_, err = exec(n.db, "DELETE FROM storage_volumes_config WHERE key='lvm.thinpool_name';")
+	_, err = exec(c.db, "DELETE FROM storage_volumes_config WHERE key='lvm.thinpool_name';")
 	if err != nil {
 		return err
 	}
diff --git a/lxd/images.go b/lxd/images.go
index f3d9259b0..f14084fb7 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -827,7 +827,7 @@ func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image) error {
 
 	// Get the IDs of all storage pools on which a storage volume
 	// for the requested image currently exists.
-	poolIDs, err := d.db.ImageGetPools(fingerprint)
+	poolIDs, err := d.cluster.ImageGetPools(fingerprint)
 	if err != nil {
 		logger.Error("Error getting image pools", log.Ctx{"err": err, "fp": fingerprint})
 		return err
@@ -1009,7 +1009,7 @@ func pruneExpiredImages(ctx context.Context, d *Daemon) {
 
 		// Get the IDs of all storage pools on which a storage volume
 		// for the requested image currently exists.
-		poolIDs, err := d.db.ImageGetPools(fp)
+		poolIDs, err := d.cluster.ImageGetPools(fp)
 		if err != nil {
 			continue
 		}
@@ -1087,7 +1087,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 			return err
 		}
 
-		poolIDs, err := d.db.ImageGetPools(imgInfo.Fingerprint)
+		poolIDs, err := d.cluster.ImageGetPools(imgInfo.Fingerprint)
 		if err != nil {
 			return err
 		}
diff --git a/lxd/main_test.go b/lxd/main_test.go
index 2c1acfd54..4bed14446 100644
--- a/lxd/main_test.go
+++ b/lxd/main_test.go
@@ -70,7 +70,7 @@ func (suite *lxdTestSuite) SetupTest() {
 	mockStorage, _ := storageTypeToString(storageTypeMock)
 	// Create the database entry for the storage pool.
 	poolDescription := fmt.Sprintf("%s storage pool", lxdTestSuiteDefaultStoragePool)
-	_, err = dbStoragePoolCreateAndUpdateCache(suite.d.db, lxdTestSuiteDefaultStoragePool, poolDescription, mockStorage, poolConfig)
+	_, err = dbStoragePoolCreateAndUpdateCache(suite.d.cluster, lxdTestSuiteDefaultStoragePool, poolDescription, mockStorage, poolConfig)
 	if err != nil {
 		suite.T().Fatalf("failed to create default storage pool: %v", err)
 	}
diff --git a/lxd/patches.go b/lxd/patches.go
index 37eeca1f8..6373c85a5 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -319,7 +319,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 	}
 
 	poolID := int64(-1)
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err == nil { // Already exist valid storage pools.
 		// Check if the storage pool already has a db entry.
 		if shared.StringInSlice(defaultPoolName, pools) {
@@ -328,7 +328,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 
 		// Get the pool ID as we need it for storage volume creation.
 		// (Use a tmp variable as Go's scoping is freaking me out.)
-		tmp, pool, err := d.db.StoragePoolGet(defaultPoolName)
+		tmp, pool, err := d.cluster.StoragePoolGet(defaultPoolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s.", err)
 			return err
@@ -341,12 +341,12 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 		if pool.Config == nil {
 			pool.Config = poolConfig
 		}
-		err = d.db.StoragePoolUpdate(defaultPoolName, "", pool.Config)
+		err = d.cluster.StoragePoolUpdate(defaultPoolName, "", pool.Config)
 		if err != nil {
 			return err
 		}
 	} else if err == db.NoSuchObjectError { // Likely a pristine upgrade.
-		tmp, err := dbStoragePoolCreateAndUpdateCache(d.db, defaultPoolName, "", defaultStorageTypeName, poolConfig)
+		tmp, err := dbStoragePoolCreateAndUpdateCache(d.cluster, defaultPoolName, "", defaultStorageTypeName, poolConfig)
 		if err != nil {
 			return err
 		}
@@ -378,7 +378,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 	}
 
 	// Get storage pool from the db after having updated it above.
-	_, defaultPool, err := d.db.StoragePoolGet(defaultPoolName)
+	_, defaultPool, err := d.cluster.StoragePoolGet(defaultPoolName)
 	if err != nil {
 		return err
 	}
@@ -392,16 +392,16 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
-			err := d.db.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for container \"%s\".", ct)
 				return err
@@ -480,16 +480,16 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 				return err
 			}
 
-			_, err = d.db.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+			_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 			if err == nil {
 				logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
-				err := d.db.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+				err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
 				if err != nil {
 					return err
 				}
 			} else if err == db.NoSuchObjectError {
 				// Insert storage volumes for containers into the database.
-				_, err := d.db.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
+				_, err := d.cluster.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
 				if err != nil {
 					logger.Errorf("Could not insert a storage volume for snapshot \"%s\".", cs)
 					return err
@@ -561,16 +561,16 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
-			err := d.db.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for image \"%s\".", img)
 				return err
@@ -616,7 +616,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 	}
 
 	poolID := int64(-1)
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err == nil { // Already exist valid storage pools.
 		// Check if the storage pool already has a db entry.
 		if shared.StringInSlice(defaultPoolName, pools) {
@@ -625,7 +625,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 
 		// Get the pool ID as we need it for storage volume creation.
 		// (Use a tmp variable as Go's scoping is freaking me out.)
-		tmp, pool, err := d.db.StoragePoolGet(defaultPoolName)
+		tmp, pool, err := d.cluster.StoragePoolGet(defaultPoolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s.", err)
 			return err
@@ -638,12 +638,12 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 		if pool.Config == nil {
 			pool.Config = poolConfig
 		}
-		err = d.db.StoragePoolUpdate(defaultPoolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(defaultPoolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
 	} else if err == db.NoSuchObjectError { // Likely a pristine upgrade.
-		tmp, err := dbStoragePoolCreateAndUpdateCache(d.db, defaultPoolName, "", defaultStorageTypeName, poolConfig)
+		tmp, err := dbStoragePoolCreateAndUpdateCache(d.cluster, defaultPoolName, "", defaultStorageTypeName, poolConfig)
 		if err != nil {
 			return err
 		}
@@ -664,7 +664,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 	}
 
 	// Get storage pool from the db after having updated it above.
-	_, defaultPool, err := d.db.StoragePoolGet(defaultPoolName)
+	_, defaultPool, err := d.cluster.StoragePoolGet(defaultPoolName)
 	if err != nil {
 		return err
 	}
@@ -679,16 +679,16 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
-			err := d.db.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for container \"%s\".", ct)
 				return err
@@ -796,16 +796,16 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
-			err := d.db.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for snapshot \"%s\".", cs)
 				return err
@@ -826,16 +826,16 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
-			err := d.db.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for image \"%s\".", img)
 				return err
@@ -915,7 +915,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 	// are already configured. If so, we can assume that a partial upgrade
 	// has been performed and can skip the next steps.
 	poolID := int64(-1)
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err == nil { // Already exist valid storage pools.
 		// Check if the storage pool already has a db entry.
 		if shared.StringInSlice(defaultPoolName, pools) {
@@ -924,7 +924,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 
 		// Get the pool ID as we need it for storage volume creation.
 		// (Use a tmp variable as Go's scoping is freaking me out.)
-		tmp, pool, err := d.db.StoragePoolGet(defaultPoolName)
+		tmp, pool, err := d.cluster.StoragePoolGet(defaultPoolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s.", err)
 			return err
@@ -937,12 +937,12 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 		if pool.Config == nil {
 			pool.Config = poolConfig
 		}
-		err = d.db.StoragePoolUpdate(defaultPoolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(defaultPoolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
 	} else if err == db.NoSuchObjectError { // Likely a pristine upgrade.
-		tmp, err := dbStoragePoolCreateAndUpdateCache(d.db, defaultPoolName, "", defaultStorageTypeName, poolConfig)
+		tmp, err := dbStoragePoolCreateAndUpdateCache(d.cluster, defaultPoolName, "", defaultStorageTypeName, poolConfig)
 		if err != nil {
 			return err
 		}
@@ -973,7 +973,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 	}
 
 	// Get storage pool from the db after having updated it above.
-	_, defaultPool, err := d.db.StoragePoolGet(defaultPoolName)
+	_, defaultPool, err := d.cluster.StoragePoolGet(defaultPoolName)
 	if err != nil {
 		return err
 	}
@@ -988,16 +988,16 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
-			err := d.db.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for container \"%s\".", ct)
 				return err
@@ -1143,16 +1143,16 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 				return err
 			}
 
-			_, err = d.db.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+			_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 			if err == nil {
 				logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
-				err := d.db.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+				err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
 				if err != nil {
 					return err
 				}
 			} else if err == db.NoSuchObjectError {
 				// Insert storage volumes for containers into the database.
-				_, err := d.db.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
+				_, err := d.cluster.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
 				if err != nil {
 					logger.Errorf("Could not insert a storage volume for snapshot \"%s\".", cs)
 					return err
@@ -1314,16 +1314,16 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
-			err := d.db.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for image \"%s\".", img)
 				return err
@@ -1375,7 +1375,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 			// This image didn't exist as a logical volume on the
 			// old LXD instance so we need to kick it from the
 			// storage volumes database for this pool.
-			err := d.db.StoragePoolVolumeDelete(img, storagePoolVolumeTypeImage, poolID)
+			err := d.cluster.StoragePoolVolumeDelete(img, storagePoolVolumeTypeImage, poolID)
 			if err != nil {
 				return err
 			}
@@ -1415,7 +1415,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 	// are already configured. If so, we can assume that a partial upgrade
 	// has been performed and can skip the next steps.
 	poolID := int64(-1)
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err == nil { // Already exist valid storage pools.
 		// Check if the storage pool already has a db entry.
 		if shared.StringInSlice(poolName, pools) {
@@ -1424,7 +1424,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 
 		// Get the pool ID as we need it for storage volume creation.
 		// (Use a tmp variable as Go's scoping is freaking me out.)
-		tmp, pool, err := d.db.StoragePoolGet(poolName)
+		tmp, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s.", err)
 			return err
@@ -1437,7 +1437,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 		if pool.Config == nil {
 			pool.Config = poolConfig
 		}
-		err = d.db.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
@@ -1469,7 +1469,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 		}
 
 		// (Use a tmp variable as Go's scoping is freaking me out.)
-		tmp, err := dbStoragePoolCreateAndUpdateCache(d.db, poolName, "", defaultStorageTypeName, poolConfig)
+		tmp, err := dbStoragePoolCreateAndUpdateCache(d.cluster, poolName, "", defaultStorageTypeName, poolConfig)
 		if err != nil {
 			logger.Warnf("Storage pool already exists in the database. Proceeding...")
 		}
@@ -1480,7 +1480,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 	}
 
 	// Get storage pool from the db after having updated it above.
-	_, defaultPool, err := d.db.StoragePoolGet(poolName)
+	_, defaultPool, err := d.cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
@@ -1505,16 +1505,16 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
-			err := d.db.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for container \"%s\".", ct)
 				return err
@@ -1591,16 +1591,16 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 				return err
 			}
 
-			_, err = d.db.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+			_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 			if err == nil {
 				logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
-				err := d.db.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+				err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
 				if err != nil {
 					return err
 				}
 			} else if err == db.NoSuchObjectError {
 				// Insert storage volumes for containers into the database.
-				_, err := d.db.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
+				_, err := d.cluster.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
 				if err != nil {
 					logger.Errorf("Could not insert a storage volume for snapshot \"%s\".", cs)
 					return err
@@ -1647,16 +1647,16 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
-			err := d.db.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for image \"%s\".", img)
 				return err
@@ -1862,7 +1862,7 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [
 }
 
 func patchStorageApiV1(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err == db.NoSuchObjectError {
 		// No pool was configured in the previous update. So we're on a
 		// pristine LXD instance.
@@ -1899,15 +1899,19 @@ func patchStorageApiV1(name string, d *Daemon) error {
 }
 
 func patchStorageApiDirCleanup(name string, d *Daemon) error {
-	return d.db.StorageVolumeCleanupImages()
+	fingerprints, err := d.db.ImagesGet(false)
+	if err != nil {
+		return err
+	}
+	return d.cluster.StorageVolumeCleanupImages(fingerprints)
 }
 
 func patchStorageApiLvmKeys(name string, d *Daemon) error {
-	return d.db.StorageVolumeMoveToLVMThinPoolNameKey()
+	return d.cluster.StorageVolumeMoveToLVMThinPoolNameKey()
 }
 
 func patchStorageApiKeys(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err == db.NoSuchObjectError {
 		// No pool was configured in the previous update. So we're on a
 		// pristine LXD instance.
@@ -1919,7 +1923,7 @@ func patchStorageApiKeys(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		_, pool, err := d.db.StoragePoolGet(poolName)
+		_, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -1952,7 +1956,7 @@ func patchStorageApiKeys(name string, d *Daemon) error {
 		}
 
 		// Update the config in the database.
-		err = d.db.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
@@ -1964,7 +1968,7 @@ func patchStorageApiKeys(name string, d *Daemon) error {
 // In case any of the objects images/containers/snapshots are missing storage
 // volume configuration entries, let's add the defaults.
 func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			return nil
@@ -1974,7 +1978,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		poolID, pool, err := d.db.StoragePoolGet(poolName)
+		poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2040,13 +2044,13 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
 		}
 
 		// Update the storage pool config.
-		err = d.db.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
 
 		// Get all storage volumes on the storage pool.
-		volumes, err := d.db.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+		volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
 		if err != nil {
 			if err == db.NoSuchObjectError {
 				continue
@@ -2101,7 +2105,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
 			// exist in the db, so it's safe to ignore the error.
 			volumeType, _ := storagePoolVolumeTypeNameToType(volume.Type)
 			// Update the volume config.
-			err = d.db.StoragePoolVolumeUpdate(volume.Name, volumeType, poolID, volume.Description, volume.Config)
+			err = d.cluster.StoragePoolVolumeUpdate(volume.Name, volumeType, poolID, volume.Description, volume.Config)
 			if err != nil {
 				return err
 			}
@@ -2112,7 +2116,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
 }
 
 func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			return nil
@@ -2122,7 +2126,7 @@ func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		_, pool, err := d.db.StoragePoolGet(poolName)
+		_, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2157,7 +2161,7 @@ func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
 		pool.Config["source"] = getStoragePoolMountPoint(poolName)
 
 		// Update the storage pool config.
-		err = d.db.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
@@ -2169,7 +2173,7 @@ func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
 }
 
 func patchStorageApiDetectLVSize(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			return nil
@@ -2179,7 +2183,7 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		poolID, pool, err := d.db.StoragePoolGet(poolName)
+		poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2202,7 +2206,7 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
 		}
 
 		// Get all storage volumes on the storage pool.
-		volumes, err := d.db.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+		volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
 		if err != nil {
 			if err == db.NoSuchObjectError {
 				continue
@@ -2249,7 +2253,7 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
 			// exist in the db, so it's safe to ignore the error.
 			volumeType, _ := storagePoolVolumeTypeNameToType(volume.Type)
 			// Update the volume config.
-			err = d.db.StoragePoolVolumeUpdate(volume.Name, volumeType, poolID, volume.Description, volume.Config)
+			err = d.cluster.StoragePoolVolumeUpdate(volume.Name, volumeType, poolID, volume.Description, volume.Config)
 			if err != nil {
 				return err
 			}
@@ -2260,11 +2264,11 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
 }
 
 func patchStorageApiInsertZfsDriver(name string, d *Daemon) error {
-	return d.db.StoragePoolInsertZfsDriver()
+	return d.cluster.StoragePoolInsertZfsDriver()
 }
 
 func patchStorageZFSnoauto(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			return nil
@@ -2274,7 +2278,7 @@ func patchStorageZFSnoauto(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		_, pool, err := d.db.StoragePoolGet(poolName)
+		_, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2327,7 +2331,7 @@ func patchStorageZFSnoauto(name string, d *Daemon) error {
 }
 
 func patchStorageZFSVolumeSize(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err == db.NoSuchObjectError {
 		// No pool was configured in the previous update. So we're on a
 		// pristine LXD instance.
@@ -2339,7 +2343,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		poolID, pool, err := d.db.StoragePoolGet(poolName)
+		poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2351,7 +2355,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
 		}
 
 		// Get all storage volumes on the storage pool.
-		volumes, err := d.db.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+		volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
 		if err != nil {
 			if err == db.NoSuchObjectError {
 				continue
@@ -2379,7 +2383,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
 			// exist in the db, so it's safe to ignore the error.
 			volumeType, _ := storagePoolVolumeTypeNameToType(volume.Type)
 			// Update the volume config.
-			err = d.db.StoragePoolVolumeUpdate(volume.Name,
+			err = d.cluster.StoragePoolVolumeUpdate(volume.Name,
 				volumeType, poolID, volume.Description,
 				volume.Config)
 			if err != nil {
@@ -2413,7 +2417,7 @@ func patchNetworkDnsmasqHosts(name string, d *Daemon) error {
 }
 
 func patchStorageApiDirBindMount(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err == db.NoSuchObjectError {
 		// No pool was configured in the previous update. So we're on a
 		// pristine LXD instance.
@@ -2425,7 +2429,7 @@ func patchStorageApiDirBindMount(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		_, pool, err := d.db.StoragePoolGet(poolName)
+		_, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2499,7 +2503,7 @@ func patchFixUploadedAt(name string, d *Daemon) error {
 }
 
 func patchStorageApiCephSizeRemove(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err == db.NoSuchObjectError {
 		// No pool was configured in the previous update. So we're on a
 		// pristine LXD instance.
@@ -2511,7 +2515,7 @@ func patchStorageApiCephSizeRemove(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		_, pool, err := d.db.StoragePoolGet(poolName)
+		_, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2528,7 +2532,7 @@ func patchStorageApiCephSizeRemove(name string, d *Daemon) error {
 		}
 
 		// Update the config in the database.
-		err = d.db.StoragePoolUpdate(poolName, pool.Description,
+		err = d.cluster.StoragePoolUpdate(poolName, pool.Description,
 			pool.Config)
 		if err != nil {
 			return err
diff --git a/lxd/profiles.go b/lxd/profiles.go
index e92a08034..6bafb47f0 100644
--- a/lxd/profiles.go
+++ b/lxd/profiles.go
@@ -84,7 +84,7 @@ func profilesPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
-	err = containerValidDevices(d.db, req.Devices, true, false)
+	err = containerValidDevices(d.cluster, req.Devices, true, false)
 	if err != nil {
 		return BadRequest(err)
 	}
diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go
index 9d0c3e0d4..e45670847 100644
--- a/lxd/profiles_utils.go
+++ b/lxd/profiles_utils.go
@@ -15,7 +15,7 @@ func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req
 		return BadRequest(err)
 	}
 
-	err = containerValidDevices(d.db, req.Devices, true, false)
+	err = containerValidDevices(d.cluster, req.Devices, true, false)
 	if err != nil {
 		return BadRequest(err)
 	}
diff --git a/lxd/storage.go b/lxd/storage.go
index d6c697063..aff9fcf44 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -283,7 +283,7 @@ func storageCoreInit(driver string) (storage, error) {
 
 func storageInit(s *state.State, poolName string, volumeName string, volumeType int) (storage, error) {
 	// Load the storage pool.
-	poolID, pool, err := s.Node.StoragePoolGet(poolName)
+	poolID, pool, err := s.Cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return nil, err
 	}
@@ -298,7 +298,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 	// Load the storage volume.
 	volume := &api.StorageVolume{}
 	if volumeName != "" && volumeType >= 0 {
-		_, volume, err = s.Node.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+		_, volume, err = s.Cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 		if err != nil {
 			return nil, err
 		}
@@ -316,7 +316,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		btrfs.pool = pool
 		btrfs.volume = volume
 		btrfs.s = s
-		btrfs.db = s.Node
 		err = btrfs.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -328,7 +327,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		dir.pool = pool
 		dir.volume = volume
 		dir.s = s
-		dir.db = s.Node
 		err = dir.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -340,7 +338,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		ceph.pool = pool
 		ceph.volume = volume
 		ceph.s = s
-		ceph.db = s.Node
 		err = ceph.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -352,7 +349,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		lvm.pool = pool
 		lvm.volume = volume
 		lvm.s = s
-		lvm.db = s.Node
 		err = lvm.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -364,7 +360,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		mock.pool = pool
 		mock.volume = volume
 		mock.s = s
-		mock.db = s.Node
 		err = mock.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -376,7 +371,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		zfs.pool = pool
 		zfs.volume = volume
 		zfs.s = s
-		zfs.db = s.Node
 		err = zfs.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -517,11 +511,11 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
 
 	st.SetStoragePoolVolumeWritable(&poolVolumePut)
 
-	poolID, err := s.Node.StoragePoolGetID(poolName)
+	poolID, err := s.Cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return nil, err
 	}
-	err = s.Node.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
+	err = s.Cluster.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
 	if err != nil {
 		return nil, err
 	}
@@ -544,7 +538,7 @@ func storagePoolVolumeContainerCreateInit(s *state.State, poolName string, conta
 
 func storagePoolVolumeContainerLoadInit(s *state.State, containerName string) (storage, error) {
 	// Get the storage pool of a given container.
-	poolName, err := s.Node.ContainerPool(containerName)
+	poolName, err := s.Cluster.ContainerPool(containerName)
 	if err != nil {
 		return nil, err
 	}
@@ -810,7 +804,7 @@ func StorageProgressWriter(op *operation, key string, description string) func(i
 }
 
 func SetupStorageDriver(s *state.State, forceCheck bool) error {
-	pools, err := s.Node.StoragePools()
+	pools, err := s.Cluster.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			logger.Debugf("No existing storage pools detected.")
@@ -854,11 +848,11 @@ func SetupStorageDriver(s *state.State, forceCheck bool) error {
 	}
 
 	// Update the storage drivers cache in api_1.0.go.
-	storagePoolDriversCacheUpdate(s.Node)
+	storagePoolDriversCacheUpdate(s.Cluster)
 	return nil
 }
 
-func storagePoolDriversCacheUpdate(dbNode *db.Node) {
+func storagePoolDriversCacheUpdate(cluster *db.Cluster) {
 	// Get a list of all storage drivers currently in use
 	// on this LXD instance. Only do this when we do not already have done
 	// this once to avoid unnecessarily querying the db. All subsequent
@@ -869,7 +863,7 @@ func storagePoolDriversCacheUpdate(dbNode *db.Node) {
 	// appropriate. (Should be cheaper then querying the db all the time,
 	// especially if we keep adding more storage drivers.)
 
-	drivers, err := dbNode.StoragePoolsGetDrivers()
+	drivers, err := cluster.StoragePoolsGetDrivers()
 	if err != nil && err != db.NoSuchObjectError {
 		return
 	}
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 1218ee271..e4ab42642 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -589,7 +589,7 @@ func (s *storageBtrfs) StoragePoolVolumeDelete() error {
 		}
 	}
 
-	err = s.db.StoragePoolVolumeDelete(
+	err = s.s.Cluster.StoragePoolVolumeDelete(
 		s.volume.Name,
 		storagePoolVolumeTypeCustom,
 		s.poolID)
@@ -685,7 +685,7 @@ func (s *storageBtrfs) StoragePoolVolumeRename(newName string) error {
 	logger.Infof(`Renamed BTRFS storage volume on storage pool "%s" from "%s" to "%s`,
 		s.pool.Name, s.volume.Name, newName)
 
-	return s.db.StoragePoolVolumeRename(s.volume.Name, newName,
+	return s.s.Cluster.StoragePoolVolumeRename(s.volume.Name, newName,
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 0c6c7f0ba..d5303a936 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -520,7 +520,7 @@ func (s *storageCeph) StoragePoolVolumeDelete() error {
 			s.volume.Name, s.pool.Name)
 	}
 
-	err = s.db.StoragePoolVolumeDelete(
+	err = s.s.Cluster.StoragePoolVolumeDelete(
 		s.volume.Name,
 		storagePoolVolumeTypeCustom,
 		s.poolID)
@@ -760,7 +760,7 @@ func (s *storageCeph) StoragePoolVolumeRename(newName string) error {
 	logger.Infof(`Renamed CEPH storage volume on OSD storage pool "%s" from "%s" to "%s`,
 		s.pool.Name, s.volume.Name, newName)
 
-	return s.db.StoragePoolVolumeRename(s.volume.Name, newName,
+	return s.s.Cluster.StoragePoolVolumeRename(s.volume.Name, newName,
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
@@ -972,7 +972,7 @@ func (s *storageCeph) ContainerCreateFromImage(container container, fingerprint
 			fingerprint, storagePoolVolumeTypeNameImage, s.UserName)
 
 		if ok {
-			_, volume, err := s.s.Node.StoragePoolVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
+			_, volume, err := s.s.Cluster.StoragePoolVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
 			if err != nil {
 				return err
 			}
@@ -2758,7 +2758,7 @@ func (s *storageCeph) StorageEntitySetQuota(volumeType int, size int64, data int
 
 	// Update the database
 	s.volume.Config["size"] = shared.GetByteSizeString(size, 0)
-	err = s.db.StoragePoolVolumeUpdate(
+	err = s.s.Cluster.StoragePoolVolumeUpdate(
 		s.volume.Name,
 		volumeType,
 		s.poolID,
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index 78e562fa7..c5aadf74d 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -359,7 +359,7 @@ func (s *storageDir) StoragePoolVolumeDelete() error {
 		return err
 	}
 
-	err = s.db.StoragePoolVolumeDelete(
+	err = s.s.Cluster.StoragePoolVolumeDelete(
 		s.volume.Name,
 		storagePoolVolumeTypeCustom,
 		s.poolID)
@@ -433,7 +433,7 @@ func (s *storageDir) StoragePoolVolumeRename(newName string) error {
 	logger.Infof(`Renamed DIR storage volume on storage pool "%s" from "%s" to "%s`,
 		s.pool.Name, s.volume.Name, newName)
 
-	return s.db.StoragePoolVolumeRename(s.volume.Name, newName,
+	return s.s.Cluster.StoragePoolVolumeRename(s.volume.Name, newName,
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 95c596b1c..aa648e1f0 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -284,7 +284,7 @@ func (s *storageLvm) StoragePoolCreate() error {
 		}
 
 		// Check that we don't already use this volume group.
-		inUse, user, err := lxdUsesPool(s.db, poolName, s.pool.Driver, "lvm.vg_name")
+		inUse, user, err := lxdUsesPool(s.s.Cluster, poolName, s.pool.Driver, "lvm.vg_name")
 		if err != nil {
 			return err
 		}
@@ -555,7 +555,7 @@ func (s *storageLvm) StoragePoolVolumeDelete() error {
 		}
 	}
 
-	err = s.db.StoragePoolVolumeDelete(
+	err = s.s.Cluster.StoragePoolVolumeDelete(
 		s.volume.Name,
 		storagePoolVolumeTypeCustom,
 		s.poolID)
@@ -852,7 +852,7 @@ func (s *storageLvm) StoragePoolVolumeRename(newName string) error {
 	logger.Infof(`Renamed ZFS storage volume on storage pool "%s" from "%s" to "%s`,
 		s.pool.Name, s.volume.Name, newName)
 
-	return s.db.StoragePoolVolumeRename(s.volume.Name, newName,
+	return s.s.Cluster.StoragePoolVolumeRename(s.volume.Name, newName,
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
@@ -1794,7 +1794,7 @@ func (s *storageLvm) StorageEntitySetQuota(volumeType int, size int64, data inte
 
 	// Update the database
 	s.volume.Config["size"] = shared.GetByteSizeString(size, 0)
-	err = s.db.StoragePoolVolumeUpdate(
+	err = s.s.Cluster.StoragePoolVolumeUpdate(
 		s.volume.Name,
 		volumeType,
 		s.poolID,
diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go
index 261e457b9..52878c689 100644
--- a/lxd/storage_lvm_utils.go
+++ b/lxd/storage_lvm_utils.go
@@ -497,7 +497,7 @@ func (s *storageLvm) containerCreateFromImageThinLv(c container, fp string) erro
 		var imgerr error
 		ok, _ := storageLVExists(imageLvmDevPath)
 		if ok {
-			_, volume, err := s.s.Node.StoragePoolVolumeGetType(fp, db.StoragePoolVolumeTypeImage, s.poolID)
+			_, volume, err := s.s.Cluster.StoragePoolVolumeGetType(fp, db.StoragePoolVolumeTypeImage, s.poolID)
 			if err != nil {
 				return err
 			}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index a676cdcad..7484a0eaf 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -27,7 +27,7 @@ func storagePoolsGet(d *Daemon, r *http.Request) Response {
 		recursion = 0
 	}
 
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err != db.NoSuchObjectError {
 		return SmartError(err)
 	}
@@ -38,13 +38,13 @@ func storagePoolsGet(d *Daemon, r *http.Request) Response {
 		if recursion == 0 {
 			resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, pool))
 		} else {
-			plID, pl, err := d.db.StoragePoolGet(pool)
+			plID, pl, err := d.cluster.StoragePoolGet(pool)
 			if err != nil {
 				continue
 			}
 
 			// Get all users of the storage pool.
-			poolUsedBy, err := storagePoolUsedByGet(d.db, plID, pool)
+			poolUsedBy, err := storagePoolUsedByGet(d.State(), plID, pool)
 			if err != nil {
 				return SmartError(err)
 			}
@@ -104,13 +104,13 @@ func storagePoolGet(d *Daemon, r *http.Request) Response {
 	poolName := mux.Vars(r)["name"]
 
 	// Get the existing storage pool.
-	poolID, pool, err := d.db.StoragePoolGet(poolName)
+	poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get all users of the storage pool.
-	poolUsedBy, err := storagePoolUsedByGet(d.db, poolID, poolName)
+	poolUsedBy, err := storagePoolUsedByGet(d.State(), poolID, poolName)
 	if err != nil && err != db.NoSuchObjectError {
 		return SmartError(err)
 	}
@@ -127,7 +127,7 @@ func storagePoolPut(d *Daemon, r *http.Request) Response {
 	poolName := mux.Vars(r)["name"]
 
 	// Get the existing storage pool.
-	_, dbInfo, err := d.db.StoragePoolGet(poolName)
+	_, dbInfo, err := d.cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -165,7 +165,7 @@ func storagePoolPatch(d *Daemon, r *http.Request) Response {
 	poolName := mux.Vars(r)["name"]
 
 	// Get the existing network
-	_, dbInfo, err := d.db.StoragePoolGet(poolName)
+	_, dbInfo, err := d.cluster.StoragePoolGet(poolName)
 	if dbInfo != nil {
 		return SmartError(err)
 	}
@@ -214,14 +214,14 @@ func storagePoolPatch(d *Daemon, r *http.Request) Response {
 func storagePoolDelete(d *Daemon, r *http.Request) Response {
 	poolName := mux.Vars(r)["name"]
 
-	poolID, err := d.db.StoragePoolGetID(poolName)
+	poolID, err := d.cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return NotFound
 	}
 
 	// Check if the storage pool has any volumes associated with it, if so
 	// error out.
-	volumeCount, err := d.db.StoragePoolVolumesGetNames(poolID)
+	volumeCount, err := d.cluster.StoragePoolVolumesGetNames(poolID)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -250,7 +250,7 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
-	err = dbStoragePoolDeleteAndUpdateCache(d.db, poolName)
+	err = dbStoragePoolDeleteAndUpdateCache(d.cluster, poolName)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 99dd4f690..77476fcb1 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -62,7 +62,7 @@ func storagePoolUpdate(state *state.State, name, newDescription string, newConfi
 
 	// Update the database if something changed
 	if len(changedConfig) != 0 || newDescription != oldDescription {
-		err = state.Node.StoragePoolUpdate(name, newDescription, newConfig)
+		err = state.Cluster.StoragePoolUpdate(name, newDescription, newConfig)
 		if err != nil {
 			return err
 		}
@@ -80,15 +80,15 @@ func storagePoolUpdate(state *state.State, name, newDescription string, newConfi
 // /1.0/containers/alp1/snapshots/snap0
 // /1.0/images/cedce20b5b236f1071134beba7a5fd2aa923fda49eea4c66454dd559a5d6e906
 // /1.0/profiles/default
-func storagePoolUsedByGet(dbObj *db.Node, poolID int64, poolName string) ([]string, error) {
+func storagePoolUsedByGet(state *state.State, poolID int64, poolName string) ([]string, error) {
 	// Retrieve all non-custom volumes that exist on this storage pool.
-	volumes, err := dbObj.StoragePoolVolumesGet(poolID, []int{storagePoolVolumeTypeContainer, storagePoolVolumeTypeImage, storagePoolVolumeTypeCustom})
+	volumes, err := state.Cluster.StoragePoolVolumesGet(poolID, []int{storagePoolVolumeTypeContainer, storagePoolVolumeTypeImage, storagePoolVolumeTypeCustom})
 	if err != nil && err != db.NoSuchObjectError {
 		return []string{}, err
 	}
 
 	// Retrieve all profiles that exist on this storage pool.
-	profiles, err := profilesUsingPoolGetNames(dbObj, poolName)
+	profiles, err := profilesUsingPoolGetNames(state.Node, poolName)
 
 	if err != nil {
 		return []string{}, err
@@ -164,7 +164,7 @@ func storagePoolDBCreate(s *state.State, poolName, poolDescription string, drive
 	}
 
 	// Check that the storage pool does not already exist.
-	_, err = s.Node.StoragePoolGetID(poolName)
+	_, err = s.Cluster.StoragePoolGetID(poolName)
 	if err == nil {
 		return fmt.Errorf("The storage pool already exists")
 	}
@@ -187,7 +187,7 @@ func storagePoolDBCreate(s *state.State, poolName, poolDescription string, drive
 	}
 
 	// Create the database entry for the storage pool.
-	_, err = dbStoragePoolCreateAndUpdateCache(s.Node, poolName, poolDescription, driver, config)
+	_, err = dbStoragePoolCreateAndUpdateCache(s.Cluster, poolName, poolDescription, driver, config)
 	if err != nil {
 		return fmt.Errorf("Error inserting %s into database: %s", poolName, err)
 	}
@@ -209,7 +209,7 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 		if !tryUndo {
 			return
 		}
-		dbStoragePoolDeleteAndUpdateCache(state.Node, poolName)
+		dbStoragePoolDeleteAndUpdateCache(state.Cluster, poolName)
 	}()
 
 	s, err := storagePoolInit(state, poolName)
@@ -238,7 +238,7 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 	configDiff, _ := storageConfigDiff(config, postCreateConfig)
 	if len(configDiff) > 0 {
 		// Create the database entry for the storage pool.
-		err = state.Node.StoragePoolUpdate(poolName, poolDescription, postCreateConfig)
+		err = state.Cluster.StoragePoolUpdate(poolName, poolDescription, postCreateConfig)
 		if err != nil {
 			return fmt.Errorf("Error inserting %s into database: %s", poolName, err)
 		}
@@ -252,7 +252,7 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 
 // Helper around the low-level DB API, which also updates the driver names
 // cache.
-func dbStoragePoolCreateAndUpdateCache(db *db.Node, poolName string, poolDescription string, poolDriver string, poolConfig map[string]string) (int64, error) {
+func dbStoragePoolCreateAndUpdateCache(db *db.Cluster, poolName string, poolDescription string, poolDriver string, poolConfig map[string]string) (int64, error) {
 	id, err := db.StoragePoolCreate(poolName, poolDescription, poolDriver, poolConfig)
 	if err != nil {
 		return id, err
@@ -266,7 +266,7 @@ func dbStoragePoolCreateAndUpdateCache(db *db.Node, poolName string, poolDescrip
 
 // Helper around the low-level DB API, which also updates the driver names
 // cache.
-func dbStoragePoolDeleteAndUpdateCache(db *db.Node, poolName string) error {
+func dbStoragePoolDeleteAndUpdateCache(db *db.Cluster, poolName string) error {
 	_, err := db.StoragePoolDelete(poolName)
 	if err != nil {
 		return err
diff --git a/lxd/storage_shared.go b/lxd/storage_shared.go
index f2e1b692d..caabf3f59 100644
--- a/lxd/storage_shared.go
+++ b/lxd/storage_shared.go
@@ -3,7 +3,6 @@ package main
 import (
 	"fmt"
 
-	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -15,8 +14,7 @@ type storageShared struct {
 	sTypeName    string
 	sTypeVersion string
 
-	db *db.Node
-	s  *state.State
+	s *state.State
 
 	poolID int64
 	pool   *api.StoragePool
@@ -109,7 +107,7 @@ func (s *storageShared) createImageDbPoolVolume(fingerprint string) error {
 	}
 
 	// Create a db entry for the storage volume of the image.
-	_, err = s.db.StoragePoolVolumeCreate(fingerprint, "", storagePoolVolumeTypeImage, s.poolID, volumeConfig)
+	_, err = s.s.Cluster.StoragePoolVolumeCreate(fingerprint, "", storagePoolVolumeTypeImage, s.poolID, volumeConfig)
 	if err != nil {
 		// Try to delete the db entry on error.
 		s.deleteImageDbPoolVolume(fingerprint)
@@ -120,7 +118,7 @@ func (s *storageShared) createImageDbPoolVolume(fingerprint string) error {
 }
 
 func (s *storageShared) deleteImageDbPoolVolume(fingerprint string) error {
-	err := s.db.StoragePoolVolumeDelete(fingerprint, storagePoolVolumeTypeImage, s.poolID)
+	err := s.s.Cluster.StoragePoolVolumeDelete(fingerprint, storagePoolVolumeTypeImage, s.poolID)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/storage_utils.go b/lxd/storage_utils.go
index 90019575d..002adcc28 100644
--- a/lxd/storage_utils.go
+++ b/lxd/storage_utils.go
@@ -164,7 +164,7 @@ const imagesDirMode os.FileMode = 0700
 const snapshotsDirMode os.FileMode = 0700
 
 // Detect whether LXD already uses the given storage pool.
-func lxdUsesPool(dbObj *db.Node, onDiskPoolName string, driver string, onDiskProperty string) (bool, string, error) {
+func lxdUsesPool(dbObj *db.Cluster, onDiskPoolName string, driver string, onDiskProperty string) (bool, string, error) {
 	pools, err := dbObj.StoragePools()
 	if err != nil && err != db.NoSuchObjectError {
 		return false, "", err
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 480029c41..6e227a5fb 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -28,14 +28,14 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) Response {
 
 	// Retrieve ID of the storage pool (and check if the storage pool
 	// exists).
-	poolID, err := d.db.StoragePoolGetID(poolName)
+	poolID, err := d.cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get all volumes currently attached to the storage pool by ID of the
 	// pool.
-	volumes, err := d.db.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+	volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
 	if err != nil && err != db.NoSuchObjectError {
 		return SmartError(err)
 	}
@@ -95,14 +95,14 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response {
 
 	// Retrieve ID of the storage pool (and check if the storage pool
 	// exists).
-	poolID, err := d.db.StoragePoolGetID(poolName)
+	poolID, err := d.cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get the names of all storage volumes of a given volume type currently
 	// attached to the storage pool.
-	volumes, err := d.db.StoragePoolVolumesGetType(volumeType, poolID)
+	volumes, err := d.cluster.StoragePoolVolumesGetType(volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -117,7 +117,7 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response {
 			}
 			resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s/%s", version.APIVersion, poolName, apiEndpoint, volume))
 		} else {
-			_, vol, err := d.db.StoragePoolVolumeGetType(volume, volumeType, poolID)
+			_, vol, err := d.cluster.StoragePoolVolumeGetType(volume, volumeType, poolID)
 			if err != nil {
 				continue
 			}
@@ -231,13 +231,13 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request) Response {
 
 	// Retrieve ID of the storage pool (and check if the storage pool
 	// exists).
-	poolID, err := d.db.StoragePoolGetID(poolName)
+	poolID, err := d.cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Check that the name isn't already in use.
-	_, err = d.db.StoragePoolVolumeGetTypeID(req.Name,
+	_, err = d.cluster.StoragePoolVolumeGetTypeID(req.Name,
 		storagePoolVolumeTypeCustom, poolID)
 	if err == nil || err != nil && err != db.NoSuchObjectError {
 		return Conflict
@@ -281,13 +281,13 @@ func storagePoolVolumeTypeGet(d *Daemon, r *http.Request) Response {
 
 	// Get the ID of the storage pool the storage volume is supposed to be
 	// attached to.
-	poolID, err := d.db.StoragePoolGetID(poolName)
+	poolID, err := d.cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get the storage volume.
-	_, volume, err := d.db.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+	_, volume, err := d.cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -325,13 +325,13 @@ func storagePoolVolumeTypePut(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("invalid storage volume type %s", volumeTypeName))
 	}
 
-	poolID, pool, err := d.db.StoragePoolGet(poolName)
+	poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get the existing storage volume.
-	_, volume, err := d.db.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+	_, volume, err := d.cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -387,13 +387,13 @@ func storagePoolVolumeTypePatch(d *Daemon, r *http.Request) Response {
 
 	// Get the ID of the storage pool the storage volume is supposed to be
 	// attached to.
-	poolID, pool, err := d.db.StoragePoolGet(poolName)
+	poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get the existing storage volume.
-	_, volume, err := d.db.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+	_, volume, err := d.cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index c79b1e461..ef691645a 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -151,14 +151,14 @@ func storagePoolVolumeUpdate(state *state.State, poolName string, volumeName str
 		s.SetStoragePoolVolumeWritable(&newWritable)
 	}
 
-	poolID, err := state.Node.StoragePoolGetID(poolName)
+	poolID, err := state.Cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return err
 	}
 
 	// Update the database if something changed
 	if len(changedConfig) != 0 || newDescription != oldDescription {
-		err = state.Node.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, newDescription, newConfig)
+		err = state.Cluster.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, newDescription, newConfig)
 		if err != nil {
 			return err
 		}
@@ -302,14 +302,14 @@ func storagePoolVolumeDBCreate(s *state.State, poolName string, volumeName, volu
 	}
 
 	// Load storage pool the volume will be attached to.
-	poolID, poolStruct, err := s.Node.StoragePoolGet(poolName)
+	poolID, poolStruct, err := s.Cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
 
 	// Check that a storage volume of the same storage volume type does not
 	// already exist.
-	volumeID, _ := s.Node.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
+	volumeID, _ := s.Cluster.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
 	if volumeID > 0 {
 		return fmt.Errorf("a storage volume of type %s does already exist", volumeTypeName)
 	}
@@ -331,7 +331,7 @@ func storagePoolVolumeDBCreate(s *state.State, poolName string, volumeName, volu
 	}
 
 	// Create the database entry for the storage volume.
-	_, err = s.Node.StoragePoolVolumeCreate(volumeName, volumeDescription, volumeType, poolID, volumeConfig)
+	_, err = s.Cluster.StoragePoolVolumeCreate(volumeName, volumeDescription, volumeType, poolID, volumeConfig)
 	if err != nil {
 		return fmt.Errorf("Error inserting %s of type %s into database: %s", poolName, volumeTypeName, err)
 	}
@@ -361,7 +361,7 @@ func storagePoolVolumeCreateInternal(state *state.State, poolName string, volume
 	// Create storage volume.
 	err = s.StoragePoolVolumeCreate()
 	if err != nil {
-		state.Node.StoragePoolVolumeDelete(volumeName, volumeType, poolID)
+		state.Cluster.StoragePoolVolumeDelete(volumeName, volumeType, poolID)
 		return err
 	}
 
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 30a616b93..021e5b13e 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -436,7 +436,7 @@ func (s *storageZfs) StoragePoolVolumeDelete() error {
 		}
 	}
 
-	err := s.db.StoragePoolVolumeDelete(
+	err := s.s.Cluster.StoragePoolVolumeDelete(
 		s.volume.Name,
 		storagePoolVolumeTypeCustom,
 		s.poolID)
@@ -640,7 +640,7 @@ func (s *storageZfs) StoragePoolVolumeRename(newName string) error {
 	logger.Infof(`Renamed ZFS storage volume on storage pool "%s" from "%s" to "%s`,
 		s.pool.Name, s.volume.Name, newName)
 
-	return s.db.StoragePoolVolumeRename(s.volume.Name, newName,
+	return s.s.Cluster.StoragePoolVolumeRename(s.volume.Name, newName,
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index b13778b55..15fc96190 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -195,16 +195,16 @@ kill_lxd() {
         check_empty_table "${daemon_dir}/lxd.db" "profiles_config"
         check_empty_table "${daemon_dir}/lxd.db" "profiles_devices"
         check_empty_table "${daemon_dir}/lxd.db" "profiles_devices_config"
-        check_empty_table "${daemon_dir}/lxd.db" "storage_pools"
-        check_empty_table "${daemon_dir}/lxd.db" "storage_pools_config"
-        check_empty_table "${daemon_dir}/lxd.db" "storage_volumes"
-        check_empty_table "${daemon_dir}/lxd.db" "storage_volumes_config"
 
         echo "==> Checking for leftover cluster DB entries"
 	# FIXME: we should not use the command line sqlite client, since it's
         #        not compatible with dqlite
         check_empty_table "${daemon_dir}/raft/db.bin" "networks"
         check_empty_table "${daemon_dir}/raft/db.bin" "networks_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools"
+        check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "storage_volumes"
+        check_empty_table "${daemon_dir}/raft/db.bin" "storage_volumes_config"
     fi
 
     # teardown storage
diff --git a/test/suites/backup.sh b/test/suites/backup.sh
index 150c14be0..84b304836 100644
--- a/test/suites/backup.sh
+++ b/test/suites/backup.sh
@@ -18,7 +18,7 @@ test_container_import() {
     lxd import ctImport --force
     kill -9 "${pid}"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     lxd import ctImport --force
     lxc start ctImport
     lxc delete --force ctImport
@@ -65,7 +65,7 @@ test_container_import() {
     kill -9 "${pid}"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     ! lxd import ctImport
     lxd import ctImport --force
     lxc info ctImport | grep snap0
@@ -79,8 +79,8 @@ test_container_import() {
     kill -9 "${pid}"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport/snap0'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport/snap0'"
     lxd import ctImport
     lxd import ctImport --force
     lxc info ctImport | grep snap0
@@ -100,7 +100,7 @@ test_container_import() {
     kill -9 "${pid}"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     ! lxd import ctImport
     lxd import ctImport --force
     lxc info ctImport | grep snap0
diff --git a/test/suites/database_update.sh b/test/suites/database_update.sh
index 4af380d32..a1f00a835 100644
--- a/test/suites/database_update.sh
+++ b/test/suites/database_update.sh
@@ -9,12 +9,12 @@ test_database_update(){
   spawn_lxd "${LXD_MIGRATE_DIR}" true
 
   # Assert there are enough tables.
-  expected_tables=22
+  expected_tables=18
   tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE")
   [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; }
 
-  # There should be 15 "ON DELETE CASCADE" occurrences
-  expected_cascades=14
+  # There should be 12 "ON DELETE CASCADE" occurrences
+  expected_cascades=11
   cascades=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "ON DELETE CASCADE")
   [ "${cascades}" -eq "${expected_cascades}" ] || { echo "FAIL: Wrong number of ON DELETE CASCADE foreign keys. Found: ${cascades}, exected: ${expected_cascades}"; false; }
 

From 2314383ee5c7327e2a0651ac6469e5f0f609c149 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 08:45:21 +0000
Subject: [PATCH 058/227] Ask about storage pools configs when joining a
 cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go         | 13 +++++++++++++
 lxd/main_init.go           | 23 +++++++++++++++++++++++
 lxd/main_init_test.go      | 14 ++++++++++++--
 shared/api/cluster.go      |  3 ++-
 shared/cmd/context.go      |  4 +++-
 shared/cmd/context_test.go |  1 +
 6 files changed, 54 insertions(+), 4 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 28b3c46f7..66641ef50 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -46,6 +46,19 @@ func clusterGet(d *Daemon, r *http.Request) Response {
 		cluster.Networks = append(cluster.Networks, *network)
 	}
 
+	// Fill the StoragePools attribute
+	pools, err := d.cluster.StoragePools()
+	if err != nil {
+		return SmartError(err)
+	}
+	for _, name := range pools {
+		_, pool, err := d.cluster.StoragePoolGet(name)
+		if err != nil {
+			return SmartError(err)
+		}
+		cluster.StoragePools = append(cluster.StoragePools, *pool)
+	}
+
 	return SyncResponse(true, cluster)
 }
 
diff --git a/lxd/main_init.go b/lxd/main_init.go
index 12c11c1d5..1c36976d3 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -183,6 +183,10 @@ func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerS
 			if err != nil {
 				return err
 			}
+			data.Pools, err = cmd.askClusteringStoragePools(cluster)
+			if err != nil {
+				return err
+			}
 			data.Networks, err = cmd.askClusteringNetworks(cluster)
 			if err != nil {
 				return err
@@ -836,6 +840,25 @@ join:
 	return params, nil
 }
 
+func (cmd *CmdInit) askClusteringStoragePools(cluster *api.Cluster) ([]api.StoragePoolsPost, error) {
+	pools := make([]api.StoragePoolsPost, len(cluster.StoragePools))
+	for i, pool := range cluster.StoragePools {
+		post := api.StoragePoolsPost{}
+		post.Name = pool.Name
+		post.Driver = pool.Driver
+		post.Config = pool.Config
+		// The only config key to ask is 'source', which is the only one node-specific.
+		key := "source"
+		question := fmt.Sprintf(
+			`Enter local value for key "%s" of storage pool "%s": `, key, post.Name)
+		// Dummy validator for allowing empty strings.
+		validator := func(string) error { return nil }
+		post.Config[key] = cmd.Context.AskString(question, "", validator)
+		pools[i] = post
+	}
+	return pools, nil
+}
+
 func (cmd *CmdInit) askClusteringNetworks(cluster *api.Cluster) ([]api.NetworksPost, error) {
 	networks := make([]api.NetworksPost, len(cluster.Networks))
 	for i, network := range cluster.Networks {
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 952af995b..873521f38 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -170,13 +170,22 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
 		"ipv4.nat": "true",
 	}
 	client := f.ClientUnix(leader)
-	suite.Req.Nil(client.CreateNetwork(network))
+	suite.Req.NoError(client.CreateNetwork(network))
+
+	pool := api.StoragePoolsPost{
+		Name:   "mypool",
+		Driver: "dir",
+	}
+	pool.Config = map[string]string{
+		"source": "",
+	}
+	suite.Req.NoError(client.CreateStoragePool(pool))
 
 	suite.command.PasswordReader = func(int) ([]byte, error) {
 		return []byte("sekret"), nil
 	}
 	port, err := shared.AllocatePort()
-	suite.Req.Nil(err)
+	suite.Req.NoError(err)
 	answers := &cmdInitAnswers{
 		WantClustering:           true,
 		ClusterName:              "rusp",
@@ -186,6 +195,7 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
 		ClusterAcceptFingerprint: true,
 		ClusterConfirmLosingData: true,
 		ClusterConfig: []string{
+			"",               // storage source
 			"10.23.189.2/24", // ipv4.address
 			"true",           // ipv4.nat
 			"aaaa:bbbb:cccc:dddd::1/64", // ipv6.address
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 5000394c5..045411d64 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -2,7 +2,8 @@ package api
 
 // Cluster represents high-level information about a LXD cluster.
 type Cluster struct {
-	Networks []Network
+	StoragePools []StoragePool
+	Networks     []Network
 }
 
 // ClusterPost represents the fields required to bootstrap or join a LXD
diff --git a/shared/cmd/context.go b/shared/cmd/context.go
index 251a5240c..f2659956d 100644
--- a/shared/cmd/context.go
+++ b/shared/cmd/context.go
@@ -3,13 +3,14 @@ package cmd
 import (
 	"bufio"
 	"fmt"
-	"gopkg.in/yaml.v2"
 	"io"
 	"io/ioutil"
 	"os"
 	"strconv"
 	"strings"
 
+	"gopkg.in/yaml.v2"
+
 	"github.com/lxc/lxd/shared"
 )
 
@@ -101,6 +102,7 @@ func (c *Context) AskString(question string, defaultAnswer string, validate func
 				fmt.Fprintf(c.stderr, "Invalid input: %s\n\n", error)
 				continue
 			}
+			return answer
 		}
 		if len(answer) != 0 {
 			return answer
diff --git a/shared/cmd/context_test.go b/shared/cmd/context_test.go
index 1e4b0c6bc..7f73e57fd 100644
--- a/shared/cmd/context_test.go
+++ b/shared/cmd/context_test.go
@@ -129,6 +129,7 @@ func TestAskString(t *testing.T) {
 			}
 			return nil
 		}, "Name?Name?", "Invalid input: ugly name\n\n", "Ted\nJohn", "John"},
+		{"Name?", "", func(string) error { return nil }, "Name?", "", "\n", ""},
 	}
 	for _, c := range cases {
 		streams := cmd.NewMemoryStreams(c.input)

From beb8220bbaf779884517b078940ca8e06e89e9ef Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 08:55:00 +0000
Subject: [PATCH 059/227] Add local storage pools to cluster database when
 joining

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go | 41 +++++++++++++++++++++++++++-----
 lxd/db/storage_pools.go   | 59 ++++++++++++++++++++++++++++++++++++++++++++---
 2 files changed, 91 insertions(+), 9 deletions(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 50c1de33a..ac1bd6e5a 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -229,12 +229,20 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 	}
 
 	// Get the local config keys for the cluster networks. It assumes that
-	// the local networks match the cluster networks, if not an error will
-	// be returned.
+	// the local storage pools and networks match the cluster networks, if
+	// not an error will be returned.
+	var pools map[string]map[string]string
 	var networks map[string]map[string]string
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		pools, err = tx.StoragePoolConfigs()
+		if err != nil {
+			return err
+		}
 		networks, err = tx.NetworkConfigs()
-		return err
+		if err != nil {
+			return err
+		}
+		return nil
 	})
 	if err != nil {
 		return err
@@ -285,15 +293,36 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 	// Make sure we can actually connect to the cluster database through
 	// the network endpoint. This also makes the Go SQL pooling system
 	// invalidate the old connection, so new queries will be executed over
-	// the new gRPC network connection. Also, update the networks table
-	// with our local configuration.
+	// the new gRPC network connection. Also, update the storage_pools and
+	// networks tables with our local configuration.
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		node, err := tx.Node(address)
 		if err != nil {
 			return errors.Wrap(err, "failed to get ID of joining node")
 		}
 		state.Cluster.ID(node.ID)
-		ids, err := tx.NetworkIDs()
+
+		// Storage pools.
+		ids, err := tx.StoragePoolIDs()
+		if err != nil {
+			return errors.Wrap(err, "failed to get cluster storage pool IDs")
+		}
+		for name, id := range ids {
+			config, ok := pools[name]
+			if !ok {
+				return fmt.Errorf("joining node has no config for pool %s", name)
+			}
+			// We only need to add the source key, since the other keys are global and
+			// are already there.
+			config = map[string]string{"source": config["source"]}
+			err := tx.StoragePoolConfigAdd(id, node.ID, config)
+			if err != nil {
+				return errors.Wrap(err, "failed to add joining node's pool config")
+			}
+		}
+
+		// Networks.
+		ids, err = tx.NetworkIDs()
 		if err != nil {
 			return errors.Wrap(err, "failed to get cluster network IDs")
 		}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 99392f90c..ad2fad60e 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -6,9 +6,62 @@ import (
 
 	_ "github.com/mattn/go-sqlite3"
 
+	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/api"
 )
 
+// StoragePoolConfigs returns a map associating each storage pool name to its
+// config values.
+func (c *ClusterTx) StoragePoolConfigs() (map[string]map[string]string, error) {
+	names, err := query.SelectStrings(c.tx, "SELECT name FROM storage_pools")
+	if err != nil {
+		return nil, err
+	}
+	pools := make(map[string]map[string]string, len(names))
+	for _, name := range names {
+		table := `
+storage_pools_config JOIN storage_pools ON storage_pools.id=storage_pools_config.storage_pool_id
+`
+		filter := fmt.Sprintf("storage_pools.name='%s'", name)
+		config, err := query.SelectConfig(c.tx, table, filter)
+		if err != nil {
+			return nil, err
+		}
+		pools[name] = config
+	}
+	return pools, nil
+}
+
+// StoragePoolIDs returns a map associating each storage pool name to its ID.
+func (c *ClusterTx) StoragePoolIDs() (map[string]int64, error) {
+	pools := []struct {
+		id   int64
+		name string
+	}{}
+	dest := func(i int) []interface{} {
+		pools = append(pools, struct {
+			id   int64
+			name string
+		}{})
+		return []interface{}{&pools[i].id, &pools[i].name}
+
+	}
+	err := query.SelectObjects(c.tx, dest, "SELECT id, name FROM storage_pools")
+	if err != nil {
+		return nil, err
+	}
+	ids := map[string]int64{}
+	for _, pool := range pools {
+		ids[pool.name] = pool.id
+	}
+	return ids, nil
+}
+
+// StoragePoolConfigAdd adds a new entry in the storage_pools_config table
+func (c *ClusterTx) StoragePoolConfigAdd(poolID, nodeID int64, config map[string]string) error {
+	return storagePoolConfigAdd(c.tx, poolID, nodeID, config)
+}
+
 // Get all storage pools.
 func (c *Cluster) StoragePools() ([]string, error) {
 	var name string
@@ -150,7 +203,7 @@ func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poo
 		return -1, err
 	}
 
-	err = StoragePoolConfigAdd(tx, id, c.id, poolConfig)
+	err = storagePoolConfigAdd(tx, id, c.id, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -165,7 +218,7 @@ func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poo
 }
 
 // Add new storage pool config.
-func StoragePoolConfigAdd(tx *sql.Tx, poolID, nodeID int64, poolConfig map[string]string) error {
+func storagePoolConfigAdd(tx *sql.Tx, poolID, nodeID int64, poolConfig map[string]string) error {
 	str := "INSERT INTO storage_pools_config (storage_pool_id, node_id, key, value) VALUES(?, ?, ?, ?)"
 	stmt, err := tx.Prepare(str)
 	defer stmt.Close()
@@ -217,7 +270,7 @@ func (c *Cluster) StoragePoolUpdate(poolName, description string, poolConfig map
 		return err
 	}
 
-	err = StoragePoolConfigAdd(tx, poolID, c.id, poolConfig)
+	err = storagePoolConfigAdd(tx, poolID, c.id, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return err

From 9c4a439e2531ebd5462d4255505da2e569e16edc Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 09:27:11 +0000
Subject: [PATCH 060/227] Add certificates table to cluster database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go      |  8 ++++++++
 lxd/db/cluster/update.go      |  8 ++++++++
 lxd/db/cluster/update_test.go | 13 +++++++++++++
 3 files changed, 29 insertions(+)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index eea04804a..04732fc63 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -6,6 +6,14 @@ package cluster
 // modify the database schema, please add a new schema update to update.go
 // and the run 'make update-schema'.
 const freshSchema = `
+CREATE TABLE certificates (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint TEXT NOT NULL,
+    type INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    certificate TEXT NOT NULL,
+    UNIQUE (fingerprint)
+);
 CREATE TABLE config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     key TEXT NOT NULL,
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 2ed8a5a6a..1c902591e 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -30,6 +30,14 @@ var updates = map[int]schema.Update{
 func updateFromV1(tx *sql.Tx) error {
 	// config table
 	stmt := `
+CREATE TABLE certificates (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint TEXT NOT NULL,
+    type INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    certificate TEXT NOT NULL,
+    UNIQUE (fingerprint)
+);
 CREATE TABLE config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     key TEXT NOT NULL,
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index 76ade6df2..bd914db33 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -31,6 +31,19 @@ func TestUpdateFromV0(t *testing.T) {
 	require.Error(t, err)
 }
 
+func TestUpdateFromV1_Certificates(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(2, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO certificates VALUES (1, 'abcd:efgh', 1, 'foo', 'FOO')")
+	require.NoError(t, err)
+
+	// Unique constraint on fingerprint.
+	_, err = db.Exec("INSERT INTO certificates VALUES (2, 'abcd:efgh', 2, 'bar', 'BAR')")
+	require.Error(t, err)
+}
+
 func TestUpdateFromV1_Config(t *testing.T) {
 	schema := cluster.Schema()
 	db, err := schema.ExerciseUpdate(2, nil)

From cea94e6dde47eb57cacafee6bec7cbb8cc2da00b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 09:44:33 +0000
Subject: [PATCH 061/227] Migrate certificates data to cluster database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/certificates.go            | 22 +++++++++++-----------
 lxd/db/certificates.go         | 20 ++++++++++----------
 lxd/db/migration.go            |  1 +
 lxd/db/migration_test.go       | 12 ++++++++++++
 lxd/db/node/schema.go          |  8 --------
 lxd/db/node/update.go          |  1 +
 test/suites/database_update.sh |  2 +-
 7 files changed, 36 insertions(+), 30 deletions(-)

diff --git a/lxd/certificates.go b/lxd/certificates.go
index fd3c2ea6c..e51e6a88f 100644
--- a/lxd/certificates.go
+++ b/lxd/certificates.go
@@ -26,7 +26,7 @@ func certificatesGet(d *Daemon, r *http.Request) Response {
 	if recursion {
 		certResponses := []api.Certificate{}
 
-		baseCerts, err := d.db.CertificatesGet()
+		baseCerts, err := d.cluster.CertificatesGet()
 		if err != nil {
 			return SmartError(err)
 		}
@@ -57,7 +57,7 @@ func certificatesGet(d *Daemon, r *http.Request) Response {
 func readSavedClientCAList(d *Daemon) {
 	d.clientCerts = []x509.Certificate{}
 
-	dbCerts, err := d.db.CertificatesGet()
+	dbCerts, err := d.cluster.CertificatesGet()
 	if err != nil {
 		logger.Infof("Error reading certificates from database: %s", err)
 		return
@@ -79,7 +79,7 @@ func readSavedClientCAList(d *Daemon) {
 	}
 }
 
-func saveCert(dbObj *db.Node, host string, cert *x509.Certificate) error {
+func saveCert(dbObj *db.Cluster, host string, cert *x509.Certificate) error {
 	baseCert := new(db.CertInfo)
 	baseCert.Fingerprint = shared.CertFingerprint(cert)
 	baseCert.Type = 1
@@ -148,7 +148,7 @@ func certificatesPost(d *Daemon, r *http.Request) Response {
 		}
 	}
 
-	err = saveCert(d.db, name, cert)
+	err = saveCert(d.cluster, name, cert)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -163,7 +163,7 @@ var certificatesCmd = Command{name: "certificates", untrustedPost: true, get: ce
 func certificateFingerprintGet(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
-	cert, err := doCertificateGet(d.db, fingerprint)
+	cert, err := doCertificateGet(d.cluster, fingerprint)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -171,7 +171,7 @@ func certificateFingerprintGet(d *Daemon, r *http.Request) Response {
 	return SyncResponseETag(true, cert, cert)
 }
 
-func doCertificateGet(db *db.Node, fingerprint string) (api.Certificate, error) {
+func doCertificateGet(db *db.Cluster, fingerprint string) (api.Certificate, error) {
 	resp := api.Certificate{}
 
 	dbCertInfo, err := db.CertificateGet(fingerprint)
@@ -194,7 +194,7 @@ func doCertificateGet(db *db.Node, fingerprint string) (api.Certificate, error)
 func certificateFingerprintPut(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
-	oldEntry, err := doCertificateGet(d.db, fingerprint)
+	oldEntry, err := doCertificateGet(d.cluster, fingerprint)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -216,7 +216,7 @@ func certificateFingerprintPut(d *Daemon, r *http.Request) Response {
 func certificateFingerprintPatch(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
-	oldEntry, err := doCertificateGet(d.db, fingerprint)
+	oldEntry, err := doCertificateGet(d.cluster, fingerprint)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -253,7 +253,7 @@ func doCertificateUpdate(d *Daemon, fingerprint string, req api.CertificatePut)
 		return BadRequest(fmt.Errorf("Unknown request type %s", req.Type))
 	}
 
-	err := d.db.CertUpdate(fingerprint, req.Name, 1)
+	err := d.cluster.CertUpdate(fingerprint, req.Name, 1)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -264,12 +264,12 @@ func doCertificateUpdate(d *Daemon, fingerprint string, req api.CertificatePut)
 func certificateFingerprintDelete(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
-	certInfo, err := d.db.CertificateGet(fingerprint)
+	certInfo, err := d.cluster.CertificateGet(fingerprint)
 	if err != nil {
 		return NotFound
 	}
 
-	err = d.db.CertDelete(certInfo.Fingerprint)
+	err = d.cluster.CertDelete(certInfo.Fingerprint)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/db/certificates.go b/lxd/db/certificates.go
index ebfd0224f..e773685e5 100644
--- a/lxd/db/certificates.go
+++ b/lxd/db/certificates.go
@@ -11,9 +11,9 @@ type CertInfo struct {
 }
 
 // CertificatesGet returns all certificates from the DB as CertBaseInfo objects.
-func (n *Node) CertificatesGet() (certs []*CertInfo, err error) {
+func (c *Cluster) CertificatesGet() (certs []*CertInfo, err error) {
 	rows, err := dbQuery(
-		n.db,
+		c.db,
 		"SELECT id, fingerprint, type, name, certificate FROM certificates",
 	)
 	if err != nil {
@@ -42,7 +42,7 @@ func (n *Node) CertificatesGet() (certs []*CertInfo, err error) {
 // pass a shortform and will get the full fingerprint.
 // There can never be more than one image with a given fingerprint, as it is
 // enforced by a UNIQUE constraint in the schema.
-func (n *Node) CertificateGet(fingerprint string) (cert *CertInfo, err error) {
+func (c *Cluster) CertificateGet(fingerprint string) (cert *CertInfo, err error) {
 	cert = new(CertInfo)
 
 	inargs := []interface{}{fingerprint + "%"}
@@ -61,7 +61,7 @@ func (n *Node) CertificateGet(fingerprint string) (cert *CertInfo, err error) {
 			certificates
 		WHERE fingerprint LIKE ?`
 
-	if err = dbQueryRowScan(n.db, query, inargs, outfmt); err != nil {
+	if err = dbQueryRowScan(c.db, query, inargs, outfmt); err != nil {
 		return nil, err
 	}
 
@@ -70,8 +70,8 @@ func (n *Node) CertificateGet(fingerprint string) (cert *CertInfo, err error) {
 
 // CertSave stores a CertBaseInfo object in the db,
 // it will ignore the ID field from the CertInfo.
-func (n *Node) CertSave(cert *CertInfo) error {
-	tx, err := begin(n.db)
+func (c *Cluster) CertSave(cert *CertInfo) error {
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -103,8 +103,8 @@ func (n *Node) CertSave(cert *CertInfo) error {
 }
 
 // CertDelete deletes a certificate from the db.
-func (n *Node) CertDelete(fingerprint string) error {
-	_, err := exec(n.db, "DELETE FROM certificates WHERE fingerprint=?", fingerprint)
+func (c *Cluster) CertDelete(fingerprint string) error {
+	_, err := exec(c.db, "DELETE FROM certificates WHERE fingerprint=?", fingerprint)
 	if err != nil {
 		return err
 	}
@@ -112,8 +112,8 @@ func (n *Node) CertDelete(fingerprint string) error {
 	return nil
 }
 
-func (n *Node) CertUpdate(fingerprint string, certName string, certType int) error {
-	tx, err := begin(n.db)
+func (c *Cluster) CertUpdate(fingerprint string, certName string, certType int) error {
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index f9d2a7f64..9744bb8a3 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -135,6 +135,7 @@ type Dump struct {
 }
 
 var preClusteringTables = []string{
+	"certificates",
 	"config",
 	"networks",
 	"networks_config",
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index b590907b4..8d2b392a7 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -40,6 +40,17 @@ func TestImportPreClusteringData(t *testing.T) {
 	err = cluster.ImportPreClusteringData(dump)
 	require.NoError(t, err)
 
+	// certificates
+	certs, err := cluster.CertificatesGet()
+	require.NoError(t, err)
+	assert.Len(t, certs, 1)
+	cert := certs[0]
+	assert.Equal(t, 1, cert.ID)
+	assert.Equal(t, "abcd:efgh", cert.Fingerprint)
+	assert.Equal(t, 1, cert.Type)
+	assert.Equal(t, "foo", cert.Name)
+	assert.Equal(t, "FOO", cert.Certificate)
+
 	// config
 	err = cluster.Transaction(func(tx *db.ClusterTx) error {
 		config, err := tx.Config()
@@ -85,6 +96,7 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 
 	stmts := []string{
 		preClusteringNodeSchema,
+		"INSERT INTO certificates VALUES (1, 'abcd:efgh', 1, 'foo', 'FOO')",
 		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
 		"INSERT INTO networks VALUES(1, 'lxcbr0', 'LXD bridge')",
 		"INSERT INTO networks_config VALUES(1, 1, 'ipv4.nat', 'true')",
diff --git a/lxd/db/node/schema.go b/lxd/db/node/schema.go
index c8105de7f..fd88a6ada 100644
--- a/lxd/db/node/schema.go
+++ b/lxd/db/node/schema.go
@@ -6,14 +6,6 @@ package node
 // modify the database schema, please add a new schema update to update.go
 // and the run 'make update-schema'.
 const freshSchema = `
-CREATE TABLE certificates (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    fingerprint VARCHAR(255) NOT NULL,
-    type INTEGER NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    certificate TEXT NOT NULL,
-    UNIQUE (fingerprint)
-);
 CREATE TABLE config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     key VARCHAR(255) NOT NULL,
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index 1e20ca8ba..1153ecf8f 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -118,6 +118,7 @@ CREATE TABLE raft_nodes (
     UNIQUE (address)
 );
 DELETE FROM config WHERE NOT key='core.https_address';
+DROP TABLE certificates;
 DROP TABLE networks_config;
 DROP TABLE networks;
 DROP TABLE storage_volumes_config;
diff --git a/test/suites/database_update.sh b/test/suites/database_update.sh
index a1f00a835..b8b46301f 100644
--- a/test/suites/database_update.sh
+++ b/test/suites/database_update.sh
@@ -9,7 +9,7 @@ test_database_update(){
   spawn_lxd "${LXD_MIGRATE_DIR}" true
 
   # Assert there are enough tables.
-  expected_tables=18
+  expected_tables=17
   tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE")
   [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; }
 

From dd6833822ad273ea971d087b918d763e92975cdf Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 10:00:04 +0000
Subject: [PATCH 062/227] Add containers, images and profiles tables to the
 cluster database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go      | 125 ++++++++++++++++++++++++++++++++++++++++++
 lxd/db/cluster/update.go      | 125 ++++++++++++++++++++++++++++++++++++++++++
 lxd/db/cluster/update_test.go |  23 ++++++++
 3 files changed, 273 insertions(+)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 04732fc63..cfd5ddbd7 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -20,6 +20,101 @@ CREATE TABLE config (
     value TEXT,
     UNIQUE (key)
 );
+CREATE TABLE containers (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    node_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    architecture INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    ephemeral INTEGER NOT NULL DEFAULT 0,
+    creation_date DATETIME NOT NULL DEFAULT 0,
+    stateful INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    description TEXT,
+    UNIQUE (name),
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE containers_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, key)
+);
+CREATE TABLE containers_devices (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    type INTEGER NOT NULL default 0,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, name)
+);
+CREATE TABLE containers_devices_config (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_device_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_device_id) REFERENCES containers_devices (id) ON DELETE CASCADE,
+    UNIQUE (container_device_id, key)
+);
+CREATE TABLE containers_profiles (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    profile_id INTEGER NOT NULL,
+    apply_order INTEGER NOT NULL default 0,
+    UNIQUE (container_id, profile_id),
+    FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE,
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE images (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint TEXT NOT NULL,
+    filename TEXT NOT NULL,
+    size INTEGER NOT NULL,
+    public INTEGER NOT NULL DEFAULT 0,
+    architecture INTEGER NOT NULL,
+    creation_date DATETIME,
+    expiry_date DATETIME,
+    upload_date DATETIME NOT NULL,
+    cached INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    auto_update INTEGER NOT NULL DEFAULT 0,
+    UNIQUE (fingerprint)
+);
+CREATE TABLE "images_aliases" (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    image_id INTEGER NOT NULL,
+    description TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    UNIQUE (name)
+);
+CREATE TABLE images_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (image_id, node_id),
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE images_properties (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE images_source (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    server TEXT NOT NULL,
+    protocol INTEGER NOT NULL,
+    certificate TEXT NOT NULL,
+    alias TEXT NOT NULL,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
 CREATE TABLE networks (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
@@ -56,6 +151,36 @@ CREATE TABLE nodes (
     UNIQUE (name),
     UNIQUE (address)
 );
+CREATE TABLE profiles (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE profiles_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (profile_id, key),
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    type INTEGER NOT NULL default 0,
+    UNIQUE (profile_id, name),
+    FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_device_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (profile_device_id, key),
+    FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE CASCADE
+);
 CREATE TABLE storage_pools (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 1c902591e..61c52750c 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -44,6 +44,101 @@ CREATE TABLE config (
     value TEXT,
     UNIQUE (key)
 );
+CREATE TABLE containers (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    node_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    architecture INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    ephemeral INTEGER NOT NULL DEFAULT 0,
+    creation_date DATETIME NOT NULL DEFAULT 0,
+    stateful INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    description TEXT,
+    UNIQUE (name),
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE containers_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, key)
+);
+CREATE TABLE containers_devices (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    type INTEGER NOT NULL default 0,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, name)
+);
+CREATE TABLE containers_devices_config (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_device_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_device_id) REFERENCES containers_devices (id) ON DELETE CASCADE,
+    UNIQUE (container_device_id, key)
+);
+CREATE TABLE containers_profiles (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    profile_id INTEGER NOT NULL,
+    apply_order INTEGER NOT NULL default 0,
+    UNIQUE (container_id, profile_id),
+    FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE,
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE images (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint TEXT NOT NULL,
+    filename TEXT NOT NULL,
+    size INTEGER NOT NULL,
+    public INTEGER NOT NULL DEFAULT 0,
+    architecture INTEGER NOT NULL,
+    creation_date DATETIME,
+    expiry_date DATETIME,
+    upload_date DATETIME NOT NULL,
+    cached INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    auto_update INTEGER NOT NULL DEFAULT 0,
+    UNIQUE (fingerprint)
+);
+CREATE TABLE "images_aliases" (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    image_id INTEGER NOT NULL,
+    description TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    UNIQUE (name)
+);
+CREATE TABLE images_properties (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE images_source (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    server TEXT NOT NULL,
+    protocol INTEGER NOT NULL,
+    certificate TEXT NOT NULL,
+    alias TEXT NOT NULL,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE images_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (image_id, node_id),
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
 CREATE TABLE networks (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
@@ -69,6 +164,36 @@ CREATE TABLE networks_config (
     FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
     FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
+CREATE TABLE profiles (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE profiles_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (profile_id, key),
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    type INTEGER NOT NULL default 0,
+    UNIQUE (profile_id, name),
+    FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_device_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (profile_device_id, key),
+    FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE CASCADE
+);
 CREATE TABLE storage_pools (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index bd914db33..f30a0d456 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -57,6 +57,29 @@ func TestUpdateFromV1_Config(t *testing.T) {
 	require.Error(t, err)
 }
 
+func TestUpdateFromV1_Containers(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(2, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'one', '', '1.1.1.1', 666, 999, ?)", time.Now())
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO nodes VALUES (2, 'two', '', '2.2.2.2', 666, 999, ?)", time.Now())
+	require.NoError(t, err)
+
+	_, err = db.Exec(`
+INSERT INTO containers VALUES (1, 1, 'bionic', 1, 1, 0, ?, 0, ?, 'Bionic Beaver')
+`, time.Now(), time.Now())
+	require.NoError(t, err)
+
+	// Unique constraint on name
+	_, err = db.Exec(`
+INSERT INTO containers VALUES (2, 2, 'bionic', 2, 2, 1, ?, 1, ?, 'Ubuntu LTS')
+`, time.Now(), time.Now())
+	require.Error(t, err)
+}
+
 func TestUpdateFromV1_Network(t *testing.T) {
 	schema := cluster.Schema()
 	db, err := schema.ExerciseUpdate(2, nil)

From d228ab67487a4437a6429f0a1bc3dcab758dff1b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 26 Oct 2017 13:35:26 +0000
Subject: [PATCH 063/227] Move containers, images and profiles tables to the
 cluster db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_internal.go            |   8 +-
 lxd/container.go               |  34 ++++----
 lxd/container_lxc.go           |  50 ++++++------
 lxd/container_post.go          |   2 +-
 lxd/container_snapshot.go      |   4 +-
 lxd/container_test.go          |   4 +-
 lxd/containers.go              |   8 +-
 lxd/containers_get.go          |   2 +-
 lxd/containers_post.go         |  14 ++--
 lxd/daemon.go                  |  28 +++++--
 lxd/daemon_images.go           |  14 ++--
 lxd/daemon_images_test.go      |   6 +-
 lxd/db/cluster/open.go         |  10 ++-
 lxd/db/cluster/update_test.go  |  12 +++
 lxd/db/containers.go           |  82 +++++++++----------
 lxd/db/db.go                   |  18 ++++-
 lxd/db/db_internal_test.go     | 176 +++--------------------------------------
 lxd/db/devices.go              |   6 +-
 lxd/db/images.go               | 112 +++++++++++++-------------
 lxd/db/migration.go            |  41 ++++++++++
 lxd/db/migration_test.go       |   1 +
 lxd/db/node/schema.go          |  91 ---------------------
 lxd/db/node/update.go          |  10 +++
 lxd/db/profiles.go             |  46 +++++------
 lxd/devices.go                 |   6 +-
 lxd/devlxd.go                  |   2 +-
 lxd/images.go                  | 110 +++++++++++++-------------
 lxd/logging.go                 |   2 +-
 lxd/main_activateifneeded.go   |  44 ++++++++++-
 lxd/main_sql.go                |   2 +-
 lxd/main_test.go               |   4 +-
 lxd/networks.go                |   4 +-
 lxd/networks_utils.go          |   2 +-
 lxd/patches.go                 |  42 +++++-----
 lxd/profiles.go                |  22 +++---
 lxd/profiles_test.go           |  34 +++-----
 lxd/profiles_utils.go          |   4 +-
 lxd/storage_lvm_utils.go       |   4 +-
 lxd/storage_pools.go           |   2 +-
 lxd/storage_pools_utils.go     |   4 +-
 lxd/storage_volumes_utils.go   |   6 +-
 test/includes/lxd.sh           |  29 ++++---
 test/suites/backup.sh          |  18 ++---
 test/suites/database_update.sh |   4 +-
 test/suites/image.sh           |   2 +-
 test/suites/profiling.sh       |   3 +-
 46 files changed, 505 insertions(+), 624 deletions(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 07d8e9ec3..0c2ca82d6 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -589,7 +589,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check if an entry for the container already exists in the db.
-	_, containerErr := d.db.ContainerId(req.Name)
+	_, containerErr := d.cluster.ContainerId(req.Name)
 	if containerErr != nil {
 		if containerErr != sql.ErrNoRows {
 			return SmartError(containerErr)
@@ -634,7 +634,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 	if containerErr == nil {
 		// Remove the storage volume db entry for the container since
 		// force was specified.
-		err := d.db.ContainerRemove(req.Name)
+		err := d.cluster.ContainerRemove(req.Name)
 		if err != nil {
 			return SmartError(err)
 		}
@@ -642,7 +642,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 
 	for _, snap := range existingSnapshots {
 		// Check if an entry for the snapshot already exists in the db.
-		_, snapErr := d.db.ContainerId(snap.Name)
+		_, snapErr := d.cluster.ContainerId(snap.Name)
 		if snapErr != nil {
 			if snapErr != sql.ErrNoRows {
 				return SmartError(snapErr)
@@ -673,7 +673,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 		}
 
 		if snapErr == nil {
-			err := d.db.ContainerRemove(snap.Name)
+			err := d.cluster.ContainerRemove(snap.Name)
 			if err != nil {
 				return SmartError(err)
 			}
diff --git a/lxd/container.go b/lxd/container.go
index ca64aa952..c561c2d16 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -572,7 +572,7 @@ func containerCreateAsEmpty(d *Daemon, args db.ContainerArgs) (container, error)
 	// Now create the empty storage
 	err = c.Storage().ContainerCreate(c)
 	if err != nil {
-		d.db.ContainerRemove(args.Name)
+		d.cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -596,7 +596,7 @@ func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (contai
 	// Now create the empty snapshot
 	err = c.Storage().ContainerSnapshotCreateEmpty(c)
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -605,7 +605,7 @@ func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (contai
 
 func containerCreateFromImage(s *state.State, args db.ContainerArgs, hash string) (container, error) {
 	// Get the image properties
-	_, img, err := s.Node.ImageGet(hash, false, false)
+	_, img, err := s.Cluster.ImageGet(hash, false, false)
 	if err != nil {
 		return nil, err
 	}
@@ -626,16 +626,16 @@ func containerCreateFromImage(s *state.State, args db.ContainerArgs, hash string
 		return nil, err
 	}
 
-	err = s.Node.ImageLastAccessUpdate(hash, time.Now().UTC())
+	err = s.Cluster.ImageLastAccessUpdate(hash, time.Now().UTC())
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, fmt.Errorf("Error updating image last use date: %s", err)
 	}
 
 	// Now create the storage from an image
 	err = c.Storage().ContainerCreateFromImage(c, hash)
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -660,7 +660,7 @@ func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContaine
 	if !containerOnly {
 		snapshots, err := sourceContainer.Snapshots()
 		if err != nil {
-			s.Node.ContainerRemove(args.Name)
+			s.Cluster.ContainerRemove(args.Name)
 			return nil, err
 		}
 
@@ -692,9 +692,9 @@ func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContaine
 	err = ct.Storage().ContainerCopy(ct, sourceContainer, containerOnly)
 	if err != nil {
 		for _, v := range csList {
-			s.Node.ContainerRemove((*v).Name())
+			s.Cluster.ContainerRemove((*v).Name())
 		}
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -773,7 +773,7 @@ func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceCont
 	// Clone the container
 	err = sourceContainer.Storage().ContainerSnapshotCreate(c, sourceContainer)
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -852,7 +852,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Validate profiles
-	profiles, err := s.Node.Profiles()
+	profiles, err := s.Cluster.Profiles()
 	if err != nil {
 		return nil, err
 	}
@@ -864,7 +864,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Create the container entry
-	id, err := s.Node.ContainerCreate(args)
+	id, err := s.Cluster.ContainerCreate(args)
 	if err != nil {
 		if err == db.DbErrAlreadyDefined {
 			thing := "Container"
@@ -882,9 +882,9 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	args.Id = id
 
 	// Read the timestamp from the database
-	dbArgs, err := s.Node.ContainerGet(args.Name)
+	dbArgs, err := s.Cluster.ContainerGet(args.Name)
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 	args.CreationDate = dbArgs.CreationDate
@@ -893,7 +893,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	// Setup the container struct and finish creation (storage and idmap)
 	c, err := containerLXCCreate(s, args)
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -948,7 +948,7 @@ func containerConfigureInternal(c container) error {
 
 func containerLoadById(s *state.State, id int) (container, error) {
 	// Get the DB record
-	name, err := s.Node.ContainerName(id)
+	name, err := s.Cluster.ContainerName(id)
 	if err != nil {
 		return nil, err
 	}
@@ -958,7 +958,7 @@ func containerLoadById(s *state.State, id int) (container, error) {
 
 func containerLoadByName(s *state.State, name string) (container, error) {
 	// Get the DB record
-	args, err := s.Node.ContainerGet(name)
+	args, err := s.Cluster.ContainerGet(name)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index f70b60efa..91a42faab 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -271,7 +271,6 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	// Create the container struct
 	c := &containerLXC{
 		state:        s,
-		db:           s.Node,
 		id:           args.Id,
 		name:         args.Name,
 		description:  args.Description,
@@ -447,7 +446,6 @@ func containerLXCLoad(s *state.State, args db.ContainerArgs) (container, error)
 	// Create the container struct
 	c := &containerLXC{
 		state:        s,
-		db:           s.Node,
 		id:           args.Id,
 		name:         args.Name,
 		description:  args.Description,
@@ -493,9 +491,9 @@ type containerLXC struct {
 	profiles        []string
 
 	// Cache
-	c        *lxc.Container
-	cConfig  bool
-	db       *db.Node
+	c       *lxc.Container
+	cConfig bool
+
 	state    *state.State
 	idmapset *idmap.IdmapSet
 
@@ -733,7 +731,7 @@ func findIdmap(state *state.State, cName string, isolatedStr string, configBase
 	idmapLock.Lock()
 	defer idmapLock.Unlock()
 
-	cs, err := state.Node.ContainersList(db.CTypeRegular)
+	cs, err := state.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return nil, 0, err
 	}
@@ -1623,7 +1621,7 @@ func (c *containerLXC) expandConfig() error {
 
 	// Apply all the profiles
 	for _, name := range c.profiles {
-		profileConfig, err := c.db.ProfileConfig(name)
+		profileConfig, err := c.state.Cluster.ProfileConfig(name)
 		if err != nil {
 			return err
 		}
@@ -1647,7 +1645,7 @@ func (c *containerLXC) expandDevices() error {
 
 	// Apply all the profiles
 	for _, p := range c.profiles {
-		profileDevices, err := c.db.Devices(p, true)
+		profileDevices, err := c.state.Cluster.Devices(p, true)
 		if err != nil {
 			return err
 		}
@@ -1775,7 +1773,7 @@ func (c *containerLXC) startCommon() (string, error) {
 		}
 
 		// Remove the volatile key from the DB
-		err = c.db.ContainerConfigRemove(c.id, "volatile.apply_quota")
+		err = c.state.Cluster.ContainerConfigRemove(c.id, "volatile.apply_quota")
 		if err != nil {
 			return "", err
 		}
@@ -2199,7 +2197,7 @@ func (c *containerLXC) startCommon() (string, error) {
 	}
 
 	// Update time container was last started
-	err = c.db.ContainerLastUsedUpdate(c.id, time.Now().UTC())
+	err = c.state.Cluster.ContainerLastUsedUpdate(c.id, time.Now().UTC())
 	if err != nil {
 		return "", fmt.Errorf("Error updating last used: %v", err)
 	}
@@ -2267,7 +2265,7 @@ func (c *containerLXC) Start(stateful bool) error {
 		os.RemoveAll(c.StatePath())
 		c.stateful = false
 
-		err = c.db.ContainerSetStateful(c.id, false)
+		err = c.state.Cluster.ContainerSetStateful(c.id, false)
 		if err != nil {
 			logger.Error("Failed starting container", ctxMap)
 			return err
@@ -2284,7 +2282,7 @@ func (c *containerLXC) Start(stateful bool) error {
 		}
 
 		c.stateful = false
-		err = c.db.ContainerSetStateful(c.id, false)
+		err = c.state.Cluster.ContainerSetStateful(c.id, false)
 		if err != nil {
 			return err
 		}
@@ -2379,7 +2377,7 @@ func (c *containerLXC) OnStart() error {
 		}
 
 		// Remove the volatile key from the DB
-		err := c.db.ContainerConfigRemove(c.id, key)
+		err := c.state.Cluster.ContainerConfigRemove(c.id, key)
 		if err != nil {
 			AADestroy(c)
 			if ourStart {
@@ -2433,7 +2431,7 @@ func (c *containerLXC) OnStart() error {
 	}
 
 	// Record current state
-	err = c.db.ContainerSetState(c.id, "RUNNING")
+	err = c.state.Cluster.ContainerSetState(c.id, "RUNNING")
 	if err != nil {
 		return err
 	}
@@ -2497,7 +2495,7 @@ func (c *containerLXC) Stop(stateful bool) error {
 		}
 
 		c.stateful = true
-		err = c.db.ContainerSetStateful(c.id, true)
+		err = c.state.Cluster.ContainerSetStateful(c.id, true)
 		if err != nil {
 			op.Done(err)
 			logger.Error("Failed stopping container", ctxMap)
@@ -2687,7 +2685,7 @@ func (c *containerLXC) OnStop(target string) error {
 		deviceTaskSchedulerTrigger("container", c.name, "stopped")
 
 		// Record current state
-		err = c.db.ContainerSetState(c.id, "STOPPED")
+		err = c.state.Cluster.ContainerSetState(c.id, "STOPPED")
 		if err != nil {
 			logger.Error("Failed to set container state", log.Ctx{"container": c.Name(), "err": err})
 		}
@@ -2881,7 +2879,7 @@ func (c *containerLXC) RenderState() (*api.ContainerState, error) {
 
 func (c *containerLXC) Snapshots() ([]container, error) {
 	// Get all the snapshots
-	snaps, err := c.db.ContainerGetSnapshots(c.name)
+	snaps, err := c.state.Cluster.ContainerGetSnapshots(c.name)
 	if err != nil {
 		return nil, err
 	}
@@ -3119,7 +3117,7 @@ func (c *containerLXC) Delete() error {
 	}
 
 	// Remove the database record
-	if err := c.db.ContainerRemove(c.Name()); err != nil {
+	if err := c.state.Cluster.ContainerRemove(c.Name()); err != nil {
 		logger.Error("Failed deleting container entry", log.Ctx{"name": c.Name(), "err": err})
 		return err
 	}
@@ -3218,7 +3216,7 @@ func (c *containerLXC) Rename(newName string) error {
 	}
 
 	// Rename the database entry
-	err = c.db.ContainerRename(oldName, newName)
+	err = c.state.Cluster.ContainerRename(oldName, newName)
 	if err != nil {
 		logger.Error("Failed renaming container", ctxMap)
 		return err
@@ -3234,7 +3232,7 @@ func (c *containerLXC) Rename(newName string) error {
 
 	if !c.IsSnapshot() {
 		// Rename all the snapshots
-		results, err := c.db.ContainerGetSnapshots(oldName)
+		results, err := c.state.Cluster.ContainerGetSnapshots(oldName)
 		if err != nil {
 			logger.Error("Failed renaming container", ctxMap)
 			return err
@@ -3244,7 +3242,7 @@ func (c *containerLXC) Rename(newName string) error {
 			// Rename the snapshot
 			baseSnapName := filepath.Base(sname)
 			newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName
-			err := c.db.ContainerRename(sname, newSnapshotName)
+			err := c.state.Cluster.ContainerRename(sname, newSnapshotName)
 			if err != nil {
 				logger.Error("Failed renaming container", ctxMap)
 				return err
@@ -3450,7 +3448,7 @@ func (c *containerLXC) Update(args db.ContainerArgs, userRequested bool) error {
 	}
 
 	// Validate the new profiles
-	profiles, err := c.db.Profiles()
+	profiles, err := c.state.Cluster.Profiles()
 	if err != nil {
 		return err
 	}
@@ -4375,7 +4373,7 @@ func (c *containerLXC) Update(args db.ContainerArgs, userRequested bool) error {
 	}
 
 	// Finally, apply the changes to the database
-	tx, err := c.db.Begin()
+	tx, err := c.state.Cluster.Begin()
 	if err != nil {
 		return err
 	}
@@ -6949,7 +6947,7 @@ func (c *containerLXC) fillNetworkDevice(name string, m types.Device) (types.Dev
 	}
 
 	updateKey := func(key string, value string) error {
-		tx, err := c.db.Begin()
+		tx, err := c.state.Cluster.Begin()
 		if err != nil {
 			return err
 		}
@@ -6983,7 +6981,7 @@ func (c *containerLXC) fillNetworkDevice(name string, m types.Device) (types.Dev
 			err = updateKey(configKey, volatileHwaddr)
 			if err != nil {
 				// Check if something else filled it in behind our back
-				value, err1 := c.db.ContainerConfigGet(c.id, configKey)
+				value, err1 := c.state.Cluster.ContainerConfigGet(c.id, configKey)
 				if err1 != nil || value == "" {
 					return nil, err
 				}
@@ -7013,7 +7011,7 @@ func (c *containerLXC) fillNetworkDevice(name string, m types.Device) (types.Dev
 			err = updateKey(configKey, volatileName)
 			if err != nil {
 				// Check if something else filled it in behind our back
-				value, err1 := c.db.ContainerConfigGet(c.id, configKey)
+				value, err1 := c.state.Cluster.ContainerConfigGet(c.id, configKey)
 				if err1 != nil || value == "" {
 					return nil, err
 				}
diff --git a/lxd/container_post.go b/lxd/container_post.go
index fa32e1c70..25e1fd04b 100644
--- a/lxd/container_post.go
+++ b/lxd/container_post.go
@@ -80,7 +80,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check that the name isn't already in use
-	id, _ := d.db.ContainerId(req.Name)
+	id, _ := d.cluster.ContainerId(req.Name)
 	if id > 0 {
 		return Conflict
 	}
diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index 07ea52681..d2c711986 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -89,7 +89,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 
 	if req.Name == "" {
 		// come up with a name
-		i := d.db.ContainerNextSnapshot(name)
+		i := d.cluster.ContainerNextSnapshot(name)
 		req.Name = fmt.Sprintf("snap%d", i)
 	}
 
@@ -258,7 +258,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 	fullName := containerName + shared.SnapshotDelimiter + newName
 
 	// Check that the name isn't already in use
-	id, _ := d.db.ContainerId(fullName)
+	id, _ := d.cluster.ContainerId(fullName)
 	if id > 0 {
 		return Conflict
 	}
diff --git a/lxd/container_test.go b/lxd/container_test.go
index 9a0676159..2c546116e 100644
--- a/lxd/container_test.go
+++ b/lxd/container_test.go
@@ -41,7 +41,7 @@ func (suite *containerTestSuite) TestContainer_ProfilesDefault() {
 
 func (suite *containerTestSuite) TestContainer_ProfilesMulti() {
 	// Create an unprivileged profile
-	_, err := suite.d.db.ProfileCreate(
+	_, err := suite.d.cluster.ProfileCreate(
 		"unprivileged",
 		"unprivileged",
 		map[string]string{"security.privileged": "true"},
@@ -49,7 +49,7 @@ func (suite *containerTestSuite) TestContainer_ProfilesMulti() {
 
 	suite.Req.Nil(err, "Failed to create the unprivileged profile.")
 	defer func() {
-		suite.d.db.ProfileDelete("unprivileged")
+		suite.d.cluster.ProfileDelete("unprivileged")
 	}()
 
 	args := db.ContainerArgs{
diff --git a/lxd/containers.go b/lxd/containers.go
index 2b884f270..e28357adc 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -106,7 +106,7 @@ func (slice containerAutostartList) Swap(i, j int) {
 
 func containersRestart(s *state.State) error {
 	// Get all the containers
-	result, err := s.Node.ContainersList(db.CTypeRegular)
+	result, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
@@ -179,7 +179,7 @@ func containersShutdown(s *state.State) error {
 	var wg sync.WaitGroup
 
 	// Get all the containers
-	results, err := s.Node.ContainersList(db.CTypeRegular)
+	results, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
@@ -198,7 +198,7 @@ func containersShutdown(s *state.State) error {
 	sort.Sort(containerStopList(containers))
 
 	// Reset all container states
-	err = s.Node.ContainersResetState()
+	err = s.Cluster.ContainersResetState()
 	if err != nil {
 		return err
 	}
@@ -256,7 +256,7 @@ func containerDeleteSnapshots(s *state.State, cname string) error {
 	logger.Debug("containerDeleteSnapshots",
 		log.Ctx{"container": cname})
 
-	results, err := s.Node.ContainerGetSnapshots(cname)
+	results, err := s.Cluster.ContainerGetSnapshots(cname)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 9ae37928b..29ac485ac 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -34,7 +34,7 @@ func containersGet(d *Daemon, r *http.Request) Response {
 }
 
 func doContainersGet(s *state.State, recursion bool) (interface{}, error) {
-	result, err := s.Node.ContainersList(db.CTypeRegular)
+	result, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 60e120453..666a3fbc2 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -32,7 +32,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 		if req.Source.Server != "" {
 			hash = req.Source.Alias
 		} else {
-			_, alias, err := d.db.ImageAliasGet(req.Source.Alias, true)
+			_, alias, err := d.cluster.ImageAliasGet(req.Source.Alias, true)
 			if err != nil {
 				return SmartError(err)
 			}
@@ -44,7 +44,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 			return BadRequest(fmt.Errorf("Property match is only supported for local images"))
 		}
 
-		hashes, err := d.db.ImagesGet(false)
+		hashes, err := d.cluster.ImagesGet(false)
 		if err != nil {
 			return SmartError(err)
 		}
@@ -52,7 +52,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 		var image *api.Image
 
 		for _, imageHash := range hashes {
-			_, img, err := d.db.ImageGet(imageHash, false, true)
+			_, img, err := d.cluster.ImageGet(imageHash, false, true)
 			if err != nil {
 				continue
 			}
@@ -108,7 +108,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 				return err
 			}
 		} else {
-			_, info, err = d.db.ImageGet(hash, false, false)
+			_, info, err = d.cluster.ImageGet(hash, false, false)
 			if err != nil {
 				return err
 			}
@@ -218,7 +218,7 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 	// If we don't have a valid pool yet, look through profiles
 	if storagePool == "" {
 		for _, pName := range req.Profiles {
-			_, p, err := d.db.ProfileGet(pName)
+			_, p, err := d.cluster.ProfileGet(pName)
 			if err != nil {
 				return SmartError(err)
 			}
@@ -292,7 +292,7 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 	 * point and just negotiate it over the migration control
 	 * socket. Anyway, it'll happen later :)
 	 */
-	_, _, err = d.db.ImageGet(req.Source.BaseImage, false, true)
+	_, _, err = d.cluster.ImageGet(req.Source.BaseImage, false, true)
 	if err != nil {
 		c, err = containerCreateAsEmpty(d, args)
 		if err != nil {
@@ -530,7 +530,7 @@ func containersPost(d *Daemon, r *http.Request) Response {
 	}
 
 	if req.Name == "" {
-		cs, err := d.db.ContainersList(db.CTypeRegular)
+		cs, err := d.cluster.ContainersList(db.CTypeRegular)
 		if err != nil {
 			return SmartError(err)
 		}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 15594c733..962b42417 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -550,7 +550,7 @@ func (d *Daemon) Ready() error {
 }
 
 func (d *Daemon) numRunningContainers() (int, error) {
-	results, err := d.db.ContainersList(db.CTypeRegular)
+	results, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return 0, err
 	}
@@ -587,10 +587,21 @@ func (d *Daemon) Stop() error {
 
 	shouldUnmount := false
 	if d.db != nil {
-		if n, err := d.numRunningContainers(); err != nil || n == 0 {
+		// It might be that database nodes are all down, in that case
+		// we don't want to wait too much.
+		//
+		// FIXME: it should be possible to provide a context or a
+		//        timeout for database queries.
+		ch := make(chan bool)
+		go func() {
+			n, err := d.numRunningContainers()
+			ch <- err != nil || n == 0
+		}()
+		select {
+		case shouldUnmount = <-ch:
+		case <-time.After(2 * time.Second):
 			shouldUnmount = true
 		}
-
 		logger.Infof("Closing the database")
 		trackError(d.db.Close())
 	}
@@ -723,10 +734,15 @@ func initializeDbObject(d *Daemon) (*db.Dump, error) {
 	for i, patch := range legacyPatches {
 		legacy[i] = &db.LegacyPatch{
 			Hook: func(node *sql.DB) error {
-				// FIXME: Attach the local db to the Daemon, since at
-				//        this stage we're not fully initialized, yet
-				//        some legacy patches expect to find it here.
+				// FIXME: Use the low-level *node* SQL db as backend for both the
+				//        db.Node and db.Cluster objects, since at this point we
+				//        haven't migrated the data to the cluster database yet.
+				cluster := d.cluster
+				defer func() {
+					d.cluster = cluster
+				}()
 				d.db = db.ForLegacyPatches(node)
+				d.cluster = db.ForLocalInspection(node)
 				return patch(d)
 			},
 		}
diff --git a/lxd/daemon_images.go b/lxd/daemon_images.go
index 853c8b1fc..f55edbcca 100644
--- a/lxd/daemon_images.go
+++ b/lxd/daemon_images.go
@@ -237,14 +237,14 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 		return nil, err
 	}
 	if preferCached && interval > 0 && alias != fp {
-		cachedFingerprint, err := d.db.ImageSourceGetCachedFingerprint(server, protocol, alias)
+		cachedFingerprint, err := d.cluster.ImageSourceGetCachedFingerprint(server, protocol, alias)
 		if err == nil && cachedFingerprint != fp {
 			fp = cachedFingerprint
 		}
 	}
 
 	// Check if the image already exists (partial hash match)
-	_, imgInfo, err := d.db.ImageGet(fp, false, true)
+	_, imgInfo, err := d.cluster.ImageGet(fp, false, true)
 	if err == nil {
 		logger.Debug("Image already exists in the db", log.Ctx{"image": fp})
 		info = imgInfo
@@ -298,7 +298,7 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 		<-waitChannel
 
 		// Grab the database entry
-		_, imgInfo, err := d.db.ImageGet(fp, false, true)
+		_, imgInfo, err := d.cluster.ImageGet(fp, false, true)
 		if err != nil {
 			// Other download failed, lets try again
 			logger.Error("Other image download didn't succeed", log.Ctx{"image": fp})
@@ -519,7 +519,7 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 	}
 
 	// Create the database entry
-	err = d.db.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+	err = d.cluster.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 	if err != nil {
 		return nil, err
 	}
@@ -545,12 +545,12 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 
 	// Record the image source
 	if alias != fp {
-		id, _, err := d.db.ImageGet(fp, false, true)
+		id, _, err := d.cluster.ImageGet(fp, false, true)
 		if err != nil {
 			return nil, err
 		}
 
-		err = d.db.ImageSourceInsert(id, server, protocol, certificate, alias)
+		err = d.cluster.ImageSourceInsert(id, server, protocol, certificate, alias)
 		if err != nil {
 			return nil, err
 		}
@@ -566,7 +566,7 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 
 	// Mark the image as "cached" if downloading for a container
 	if forContainer {
-		err := d.db.ImageLastAccessInit(fp)
+		err := d.cluster.ImageLastAccessInit(fp)
 		if err != nil {
 			return nil, err
 		}
diff --git a/lxd/daemon_images_test.go b/lxd/daemon_images_test.go
index 7833cdfa2..68683dde7 100644
--- a/lxd/daemon_images_test.go
+++ b/lxd/daemon_images_test.go
@@ -19,11 +19,11 @@ type daemonImagesTestSuite struct {
 // newer image even if available, and just use the cached one.
 func (suite *daemonImagesTestSuite) TestUseCachedImagesIfAvailable() {
 	// Create an image with alias "test" and fingerprint "abcd".
-	err := suite.d.db.ImageInsert("abcd", "foo.xz", 1, false, true, "amd64", time.Now(), time.Now(), map[string]string{})
+	err := suite.d.cluster.ImageInsert("abcd", "foo.xz", 1, false, true, "amd64", time.Now(), time.Now(), map[string]string{})
 	suite.Req.Nil(err)
-	id, _, err := suite.d.db.ImageGet("abcd", false, true)
+	id, _, err := suite.d.cluster.ImageGet("abcd", false, true)
 	suite.Req.Nil(err)
-	err = suite.d.db.ImageSourceInsert(id, "img.srv", "simplestreams", "", "test")
+	err = suite.d.cluster.ImageSourceInsert(id, "img.srv", "simplestreams", "", "test")
 	suite.Req.Nil(err)
 
 	// Pretend we have already a non-expired entry for the remote
diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index bcbb3a727..fbc678178 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -96,7 +96,8 @@ func EnsureSchema(db *sql.DB, address string) (bool, error) {
 	}
 
 	// When creating a database from scratch, insert an entry for node
-	// 1. This is needed for referential integrity with other tables.
+	// 1. This is needed for referential integrity with other tables. Also,
+	// create a default profile.
 	if initial == 0 {
 		stmt := `
 INSERT INTO nodes(id, name, address, schema, api_extensions) VALUES(1, 'none', '0.0.0.0', ?, ?)
@@ -106,6 +107,13 @@ INSERT INTO nodes(id, name, address, schema, api_extensions) VALUES(1, 'none', '
 			return false, err
 		}
 
+		stmt = `
+INSERT INTO profiles (name, description) VALUES ('default', 'Default LXD profile')
+`
+		_, err = db.Exec(stmt)
+		if err != nil {
+			return false, err
+		}
 	}
 
 	return true, err
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index f30a0d456..463d5d8f0 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -78,6 +78,18 @@ INSERT INTO containers VALUES (1, 1, 'bionic', 1, 1, 0, ?, 0, ?, 'Bionic Beaver'
 INSERT INTO containers VALUES (2, 2, 'bionic', 2, 2, 1, ?, 1, ?, 'Ubuntu LTS')
 `, time.Now(), time.Now())
 	require.Error(t, err)
+
+	// Cascading delete
+	_, err = db.Exec("INSERT INTO containers_config VALUES (1, 1, 'thekey', 'thevalue')")
+	require.NoError(t, err)
+	_, err = db.Exec("DELETE FROM containers")
+	require.NoError(t, err)
+	result, err := db.Exec("DELETE FROM containers_config")
+	require.NoError(t, err)
+	n, err := result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(0), n) // The row was already deleted by the previous query
+
 }
 
 func TestUpdateFromV1_Network(t *testing.T) {
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index e51df08d3..337a432a1 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -40,13 +40,13 @@ const (
 	CTypeSnapshot ContainerType = 1
 )
 
-func (n *Node) ContainerRemove(name string) error {
-	id, err := n.ContainerId(name)
+func (c *Cluster) ContainerRemove(name string) error {
+	id, err := c.ContainerId(name)
 	if err != nil {
 		return err
 	}
 
-	_, err = exec(n.db, "DELETE FROM containers WHERE id=?", id)
+	_, err = exec(c.db, "DELETE FROM containers WHERE id=?", id)
 	if err != nil {
 		return err
 	}
@@ -54,25 +54,25 @@ func (n *Node) ContainerRemove(name string) error {
 	return nil
 }
 
-func (n *Node) ContainerName(id int) (string, error) {
+func (c *Cluster) ContainerName(id int) (string, error) {
 	q := "SELECT name FROM containers WHERE id=?"
 	name := ""
 	arg1 := []interface{}{id}
 	arg2 := []interface{}{&name}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	return name, err
 }
 
-func (n *Node) ContainerId(name string) (int, error) {
+func (c *Cluster) ContainerId(name string) (int, error) {
 	q := "SELECT id FROM containers WHERE name=?"
 	id := -1
 	arg1 := []interface{}{name}
 	arg2 := []interface{}{&id}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	return id, err
 }
 
-func (n *Node) ContainerGet(name string) (ContainerArgs, error) {
+func (c *Cluster) ContainerGet(name string) (ContainerArgs, error) {
 	var used *time.Time // Hold the db-returned time
 	description := sql.NullString{}
 
@@ -84,7 +84,7 @@ func (n *Node) ContainerGet(name string) (ContainerArgs, error) {
 	q := "SELECT id, description, architecture, type, ephemeral, stateful, creation_date, last_use_date FROM containers WHERE name=?"
 	arg1 := []interface{}{name}
 	arg2 := []interface{}{&args.Id, &description, &args.Architecture, &args.Ctype, &ephemInt, &statefulInt, &args.CreationDate, &used}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		return args, err
 	}
@@ -109,13 +109,13 @@ func (n *Node) ContainerGet(name string) (ContainerArgs, error) {
 		args.LastUsedDate = time.Unix(0, 0).UTC()
 	}
 
-	config, err := n.ContainerConfig(args.Id)
+	config, err := c.ContainerConfig(args.Id)
 	if err != nil {
 		return args, err
 	}
 	args.Config = config
 
-	profiles, err := n.ContainerProfiles(args.Id)
+	profiles, err := c.ContainerProfiles(args.Id)
 	if err != nil {
 		return args, err
 	}
@@ -123,7 +123,7 @@ func (n *Node) ContainerGet(name string) (ContainerArgs, error) {
 
 	/* get container_devices */
 	args.Devices = types.Devices{}
-	newdevs, err := n.Devices(name, false)
+	newdevs, err := c.Devices(name, false)
 	if err != nil {
 		return args, err
 	}
@@ -135,13 +135,13 @@ func (n *Node) ContainerGet(name string) (ContainerArgs, error) {
 	return args, nil
 }
 
-func (n *Node) ContainerCreate(args ContainerArgs) (int, error) {
-	_, err := n.ContainerId(args.Name)
+func (c *Cluster) ContainerCreate(args ContainerArgs) (int, error) {
+	_, err := c.ContainerId(args.Name)
 	if err == nil {
 		return 0, DbErrAlreadyDefined
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return 0, err
 	}
@@ -159,14 +159,14 @@ func (n *Node) ContainerCreate(args ContainerArgs) (int, error) {
 	args.CreationDate = time.Now().UTC()
 	args.LastUsedDate = time.Unix(0, 0).UTC()
 
-	str := fmt.Sprintf("INSERT INTO containers (name, architecture, type, ephemeral, creation_date, last_use_date, stateful) VALUES (?, ?, ?, ?, ?, ?, ?)")
+	str := fmt.Sprintf("INSERT INTO containers (node_id, name, architecture, type, ephemeral, creation_date, last_use_date, stateful) VALUES (?, ?, ?, ?, ?, ?, ?, ?)")
 	stmt, err := tx.Prepare(str)
 	if err != nil {
 		tx.Rollback()
 		return 0, err
 	}
 	defer stmt.Close()
-	result, err := stmt.Exec(args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix(), args.LastUsedDate.Unix(), statefulInt)
+	result, err := stmt.Exec(c.id, args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix(), args.LastUsedDate.Unix(), statefulInt)
 	if err != nil {
 		tx.Rollback()
 		return 0, err
@@ -238,27 +238,27 @@ func ContainerConfigInsert(tx *sql.Tx, id int, config map[string]string) error {
 	return nil
 }
 
-func (n *Node) ContainerConfigGet(id int, key string) (string, error) {
+func (c *Cluster) ContainerConfigGet(id int, key string) (string, error) {
 	q := "SELECT value FROM containers_config WHERE container_id=? AND key=?"
 	value := ""
 	arg1 := []interface{}{id, key}
 	arg2 := []interface{}{&value}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	return value, err
 }
 
-func (n *Node) ContainerConfigRemove(id int, name string) error {
-	_, err := exec(n.db, "DELETE FROM containers_config WHERE key=? AND container_id=?", name, id)
+func (c *Cluster) ContainerConfigRemove(id int, name string) error {
+	_, err := exec(c.db, "DELETE FROM containers_config WHERE key=? AND container_id=?", name, id)
 	return err
 }
 
-func (n *Node) ContainerSetStateful(id int, stateful bool) error {
+func (c *Cluster) ContainerSetStateful(id int, stateful bool) error {
 	statefulInt := 0
 	if stateful {
 		statefulInt = 1
 	}
 
-	_, err := exec(n.db, "UPDATE containers SET stateful=? WHERE id=?", statefulInt, id)
+	_, err := exec(c.db, "UPDATE containers SET stateful=? WHERE id=?", statefulInt, id)
 	return err
 }
 
@@ -285,7 +285,7 @@ func ContainerProfilesInsert(tx *sql.Tx, id int, profiles []string) error {
 }
 
 // Get a list of profiles for a given container id.
-func (n *Node) ContainerProfiles(containerId int) ([]string, error) {
+func (c *Cluster) ContainerProfiles(containerId int) ([]string, error) {
 	var name string
 	var profiles []string
 
@@ -297,7 +297,7 @@ func (n *Node) ContainerProfiles(containerId int) ([]string, error) {
 	inargs := []interface{}{containerId}
 	outfmt := []interface{}{name}
 
-	results, err := queryScan(n.db, query, inargs, outfmt)
+	results, err := queryScan(c.db, query, inargs, outfmt)
 	if err != nil {
 		return nil, err
 	}
@@ -312,7 +312,7 @@ func (n *Node) ContainerProfiles(containerId int) ([]string, error) {
 }
 
 // ContainerConfig gets the container configuration map from the DB
-func (n *Node) ContainerConfig(containerId int) (map[string]string, error) {
+func (c *Cluster) ContainerConfig(containerId int) (map[string]string, error) {
 	var key, value string
 	q := `SELECT key, value FROM containers_config WHERE container_id=?`
 
@@ -320,7 +320,7 @@ func (n *Node) ContainerConfig(containerId int) (map[string]string, error) {
 	outfmt := []interface{}{key, value}
 
 	// Results is already a slice here, not db Rows anymore.
-	results, err := queryScan(n.db, q, inargs, outfmt)
+	results, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return nil, err //SmartError will wrap this and make "not found" errors pretty
 	}
@@ -337,12 +337,12 @@ func (n *Node) ContainerConfig(containerId int) (map[string]string, error) {
 	return config, nil
 }
 
-func (n *Node) ContainersList(cType ContainerType) ([]string, error) {
+func (c *Cluster) ContainersList(cType ContainerType) ([]string, error) {
 	q := fmt.Sprintf("SELECT name FROM containers WHERE type=? ORDER BY name")
 	inargs := []interface{}{cType}
 	var container string
 	outfmt := []interface{}{container}
-	result, err := queryScan(n.db, q, inargs, outfmt)
+	result, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return nil, err
 	}
@@ -355,14 +355,14 @@ func (n *Node) ContainersList(cType ContainerType) ([]string, error) {
 	return ret, nil
 }
 
-func (n *Node) ContainersResetState() error {
+func (c *Cluster) ContainersResetState() error {
 	// Reset all container states
-	_, err := exec(n.db, "DELETE FROM containers_config WHERE key='volatile.last_state.power'")
+	_, err := exec(c.db, "DELETE FROM containers_config WHERE key='volatile.last_state.power'")
 	return err
 }
 
-func (n *Node) ContainerSetState(id int, state string) error {
-	tx, err := begin(n.db)
+func (c *Cluster) ContainerSetState(id int, state string) error {
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -398,8 +398,8 @@ func (n *Node) ContainerSetState(id int, state string) error {
 	return TxCommit(tx)
 }
 
-func (n *Node) ContainerRename(oldName string, newName string) error {
-	tx, err := begin(n.db)
+func (c *Cluster) ContainerRename(oldName string, newName string) error {
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -446,13 +446,13 @@ func ContainerUpdate(tx *sql.Tx, id int, description string, architecture int, e
 	return nil
 }
 
-func (n *Node) ContainerLastUsedUpdate(id int, date time.Time) error {
+func (c *Cluster) ContainerLastUsedUpdate(id int, date time.Time) error {
 	stmt := `UPDATE containers SET last_use_date=? WHERE id=?`
-	_, err := exec(n.db, stmt, date, id)
+	_, err := exec(c.db, stmt, date, id)
 	return err
 }
 
-func (n *Node) ContainerGetSnapshots(name string) ([]string, error) {
+func (c *Cluster) ContainerGetSnapshots(name string) ([]string, error) {
 	result := []string{}
 
 	regexp := name + shared.SnapshotDelimiter
@@ -460,7 +460,7 @@ func (n *Node) ContainerGetSnapshots(name string) ([]string, error) {
 	q := "SELECT name FROM containers WHERE type=? AND SUBSTR(name,1,?)=?"
 	inargs := []interface{}{CTypeSnapshot, length, regexp}
 	outfmt := []interface{}{name}
-	dbResults, err := queryScan(n.db, q, inargs, outfmt)
+	dbResults, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return result, err
 	}
@@ -476,14 +476,14 @@ func (n *Node) ContainerGetSnapshots(name string) ([]string, error) {
  * Note, the code below doesn't deal with snapshots of snapshots.
  * To do that, we'll need to weed out based on # slashes in names
  */
-func (n *Node) ContainerNextSnapshot(name string) int {
+func (c *Cluster) ContainerNextSnapshot(name string) int {
 	base := name + shared.SnapshotDelimiter + "snap"
 	length := len(base)
 	q := fmt.Sprintf("SELECT name FROM containers WHERE type=? AND SUBSTR(name,1,?)=?")
 	var numstr string
 	inargs := []interface{}{CTypeSnapshot, length, base}
 	outfmt := []interface{}{numstr}
-	results, err := queryScan(n.db, q, inargs, outfmt)
+	results, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return 0
 	}
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 76e20ed10..0dd5e6c7e 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -84,10 +84,6 @@ func OpenNode(dir string, fresh func(*Node) error, legacyPatches map[int]*Legacy
 	}
 
 	if initial == 0 {
-		err := node.ProfileCreateDefault()
-		if err != nil {
-			return nil, nil, err
-		}
 		if fresh != nil {
 			err := fresh(node)
 			if err != nil {
@@ -201,6 +197,13 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 	return cluster, nil
 }
 
+// ForLocalInspection is a aid for the hack in initializeDbObject, which
+// sets the db-related Deamon attributes upfront, to be backward compatible
+// with the legacy patches that need to interact with the database.
+func ForLocalInspection(db *sql.DB) *Cluster {
+	return &Cluster{db: db}
+}
+
 // Transaction creates a new ClusterTx object and transactionally executes the
 // cluster database interactions invoked by the given function. If the function
 // returns no error, all database changes are committed to the cluster database
@@ -252,6 +255,13 @@ func (c *Cluster) DB() *sql.DB {
 	return c.db
 }
 
+// Begin a new transaction against the cluster database.
+//
+// FIXME: legacy method.
+func (c *Cluster) Begin() (*sql.Tx, error) {
+	return begin(c.db)
+}
+
 // UpdateSchemasDotGo updates the schema.go files in the local/ and cluster/
 // sub-packages.
 func UpdateSchemasDotGo() error {
diff --git a/lxd/db/db_internal_test.go b/lxd/db/db_internal_test.go
index 9daf779a9..4ea4bd355 100644
--- a/lxd/db/db_internal_test.go
+++ b/lxd/db/db_internal_test.go
@@ -3,16 +3,11 @@ package db
 import (
 	"database/sql"
 	"fmt"
-	"io/ioutil"
-	"os"
-	"path/filepath"
 	"testing"
 	"time"
 
 	"github.com/stretchr/testify/suite"
 
-	"github.com/lxc/lxd/lxd/db/node"
-	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/lxd/types"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/logger"
@@ -20,7 +15,7 @@ import (
 )
 
 const DB_FIXTURES string = `
-    INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1);
+    INSERT INTO containers (node_id, name, architecture, type) VALUES (1, 'thename', 1, 1);
     INSERT INTO profiles (name) VALUES ('theprofile');
     INSERT INTO containers_profiles (container_id, profile_id) VALUES (1, 2);
     INSERT INTO containers_config (container_id, key, value) VALUES (1, 'thekey', 'thevalue');
@@ -37,23 +32,23 @@ const DB_FIXTURES string = `
 type dbTestSuite struct {
 	suite.Suite
 
-	dir string
-	db  *Node
+	dir     string
+	db      *Cluster
+	cleanup func()
 }
 
 func (s *dbTestSuite) SetupTest() {
-	s.db = s.CreateTestDb()
+	s.db, s.cleanup = s.CreateTestDb()
 	_, err := s.db.DB().Exec(DB_FIXTURES)
 	s.Nil(err)
 }
 
 func (s *dbTestSuite) TearDownTest() {
-	s.db.DB().Close()
-	os.RemoveAll(s.dir)
+	s.cleanup()
 }
 
 // Initialize a test in-memory DB.
-func (s *dbTestSuite) CreateTestDb() *Node {
+func (s *dbTestSuite) CreateTestDb() (*Cluster, func()) {
 	var err error
 
 	// Setup logging if main() hasn't been called/when testing
@@ -62,12 +57,8 @@ func (s *dbTestSuite) CreateTestDb() *Node {
 		s.Nil(err)
 	}
 
-	s.dir, err = ioutil.TempDir("", "lxd-db-test")
-	s.Nil(err)
-
-	db, _, err := OpenNode(s.dir, nil, nil)
-	s.Nil(err)
-	return db
+	db, cleanup := NewTestCluster(s.T())
+	return db, cleanup
 }
 
 func TestDBTestSuite(t *testing.T) {
@@ -169,155 +160,6 @@ func (s *dbTestSuite) Test_deleting_an_image_cascades_on_related_tables() {
 	s.Equal(count, 0, "Deleting an image didn't delete the related images_properties!")
 }
 
-func (s *dbTestSuite) Test_running_UpdateFromV6_adds_on_delete_cascade() {
-	// Upgrading the database schema with updateFromV6 adds ON DELETE CASCADE
-	// to sqlite tables that require it, and conserve the data.
-
-	var err error
-	var count int
-
-	db := s.CreateTestDb()
-	defer db.DB().Close()
-
-	statements := `
-CREATE TABLE IF NOT EXISTS containers (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    architecture INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    power_state INTEGER NOT NULL DEFAULT 0,
-    ephemeral INTEGER NOT NULL DEFAULT 0,
-    UNIQUE (name)
-);
-CREATE TABLE IF NOT EXISTS containers_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    container_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (container_id) REFERENCES containers (id),
-    UNIQUE (container_id, key)
-);
-
-INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1);
-INSERT INTO containers_config (container_id, key, value) VALUES (1, 'thekey', 'thevalue');`
-
-	_, err = db.DB().Exec(statements)
-	s.Nil(err)
-
-	// Run the upgrade from V6 code
-	err = query.Transaction(db.DB(), node.UpdateFromV16)
-	s.Nil(err)
-
-	// Make sure the inserted data is still there.
-	statements = `SELECT count(*) FROM containers_config;`
-	err = db.DB().QueryRow(statements).Scan(&count)
-	s.Nil(err)
-	s.Equal(count, 1, "There should be exactly one entry in containers_config!")
-
-	// Drop the container.
-	statements = `DELETE FROM containers WHERE name = 'thename';`
-
-	_, err = db.DB().Exec(statements)
-	s.Nil(err)
-
-	// Make sure there are 0 container_profiles entries left.
-	statements = `SELECT count(*) FROM containers_profiles;`
-	err = db.DB().QueryRow(statements).Scan(&count)
-	s.Nil(err)
-	s.Equal(count, 0, "Deleting a container didn't delete the profile association!")
-}
-
-func (s *dbTestSuite) Test_run_database_upgrades_with_some_foreign_keys_inconsistencies() {
-	var db *sql.DB
-	var err error
-	var count int
-	var statements string
-
-	dir, err := ioutil.TempDir("", "lxd-db-test-")
-	s.Nil(err)
-	defer os.RemoveAll(dir)
-	path := filepath.Join(dir, "lxd.db")
-	db, err = sql.Open("sqlite3", path)
-	defer db.Close()
-	s.Nil(err)
-
-	// This schema is a part of schema rev 1.
-	statements = `
-CREATE TABLE containers (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    architecture INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    UNIQUE (name)
-);
-CREATE TABLE containers_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    container_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (container_id) REFERENCES containers (id),
-    UNIQUE (container_id, key)
-);
-CREATE TABLE schema (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    version INTEGER NOT NULL,
-    updated_at DATETIME NOT NULL,
-    UNIQUE (version)
-);
-CREATE TABLE images (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    fingerprint VARCHAR(255) NOT NULL,
-    filename VARCHAR(255) NOT NULL,
-    size INTEGER NOT NULL,
-    public INTEGER NOT NULL DEFAULT 0,
-    architecture INTEGER NOT NULL,
-    creation_date DATETIME,
-    expiry_date DATETIME,
-    upload_date DATETIME NOT NULL,
-    UNIQUE (fingerprint)
-);
-CREATE TABLE images_properties (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    image_id INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (image_id) REFERENCES images (id)
-);
-CREATE TABLE certificates (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    fingerprint VARCHAR(255) NOT NULL,
-    type INTEGER NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    certificate TEXT NOT NULL,
-    UNIQUE (fingerprint)
-);
-INSERT INTO schema (version, updated_at) values (1, "now");
-INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1);
-INSERT INTO containers_config (container_id, key, value) VALUES (1, 'thekey', 'thevalue');`
-
-	_, err = db.Exec(statements)
-	s.Nil(err)
-
-	// Now that we have a consistent schema, let's remove the container entry
-	// *without* the ON DELETE CASCADE in place.
-	statements = `DELETE FROM containers;`
-	_, err = db.Exec(statements)
-	s.Nil(err)
-
-	// The "foreign key" on containers_config now points to nothing.
-	// Let's run the schema upgrades.
-	schema := node.Schema()
-	_, err = schema.Ensure(db)
-	s.Nil(err)
-
-	// Make sure there are 0 containers_config entries left.
-	statements = `SELECT count(*) FROM containers_config;`
-	err = db.QueryRow(statements).Scan(&count)
-	s.Nil(err)
-	s.Equal(count, 0, "updateDb did not delete orphaned child entries after adding ON DELETE CASCADE!")
-}
-
 func (s *dbTestSuite) Test_ImageGet_finds_image_for_fingerprint() {
 	var err error
 	var result *api.Image
diff --git a/lxd/db/devices.go b/lxd/db/devices.go
index 2e321ab3e..94c0b4f69 100644
--- a/lxd/db/devices.go
+++ b/lxd/db/devices.go
@@ -138,7 +138,7 @@ func dbDeviceConfig(db *sql.DB, id int, isprofile bool) (types.Device, error) {
 	return newdev, nil
 }
 
-func (n *Node) Devices(qName string, isprofile bool) (types.Devices, error) {
+func (c *Cluster) Devices(qName string, isprofile bool) (types.Devices, error) {
 	var q string
 	if isprofile {
 		q = `SELECT profiles_devices.id, profiles_devices.name, profiles_devices.type
@@ -155,7 +155,7 @@ func (n *Node) Devices(qName string, isprofile bool) (types.Devices, error) {
 	var name, stype string
 	inargs := []interface{}{qName}
 	outfmt := []interface{}{id, name, dtype}
-	results, err := queryScan(n.db, q, inargs, outfmt)
+	results, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return nil, err
 	}
@@ -168,7 +168,7 @@ func (n *Node) Devices(qName string, isprofile bool) (types.Devices, error) {
 		if err != nil {
 			return nil, err
 		}
-		newdev, err := dbDeviceConfig(n.db, id, isprofile)
+		newdev, err := dbDeviceConfig(c.db, id, isprofile)
 		if err != nil {
 			return nil, err
 		}
diff --git a/lxd/db/images.go b/lxd/db/images.go
index 244d5777d..e69e8acb7 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -17,7 +17,7 @@ var ImageSourceProtocol = map[int]string{
 	2: "simplestreams",
 }
 
-func (n *Node) ImagesGet(public bool) ([]string, error) {
+func (c *Cluster) ImagesGet(public bool) ([]string, error) {
 	q := "SELECT fingerprint FROM images"
 	if public == true {
 		q = "SELECT fingerprint FROM images WHERE public=1"
@@ -26,7 +26,7 @@ func (n *Node) ImagesGet(public bool) ([]string, error) {
 	var fp string
 	inargs := []interface{}{}
 	outfmt := []interface{}{fp}
-	dbResults, err := queryScan(n.db, q, inargs, outfmt)
+	dbResults, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return []string{}, err
 	}
@@ -39,7 +39,7 @@ func (n *Node) ImagesGet(public bool) ([]string, error) {
 	return results, nil
 }
 
-func (n *Node) ImagesGetExpired(expiry int64) ([]string, error) {
+func (c *Cluster) ImagesGetExpired(expiry int64) ([]string, error) {
 	q := `SELECT fingerprint, last_use_date, upload_date FROM images WHERE cached=1`
 
 	var fpStr string
@@ -48,7 +48,7 @@ func (n *Node) ImagesGetExpired(expiry int64) ([]string, error) {
 
 	inargs := []interface{}{}
 	outfmt := []interface{}{fpStr, useStr, uploadStr}
-	dbResults, err := queryScan(n.db, q, inargs, outfmt)
+	dbResults, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return []string{}, err
 	}
@@ -79,7 +79,7 @@ func (n *Node) ImagesGetExpired(expiry int64) ([]string, error) {
 	return results, nil
 }
 
-func (n *Node) ImageSourceInsert(imageId int, server string, protocol string, certificate string, alias string) error {
+func (c *Cluster) ImageSourceInsert(imageId int, server string, protocol string, certificate string, alias string) error {
 	stmt := `INSERT INTO images_source (image_id, server, protocol, certificate, alias) values (?, ?, ?, ?, ?)`
 
 	protocolInt := -1
@@ -93,11 +93,11 @@ func (n *Node) ImageSourceInsert(imageId int, server string, protocol string, ce
 		return fmt.Errorf("Invalid protocol: %s", protocol)
 	}
 
-	_, err := exec(n.db, stmt, imageId, server, protocolInt, certificate, alias)
+	_, err := exec(c.db, stmt, imageId, server, protocolInt, certificate, alias)
 	return err
 }
 
-func (n *Node) ImageSourceGet(imageId int) (int, api.ImageSource, error) {
+func (c *Cluster) ImageSourceGet(imageId int) (int, api.ImageSource, error) {
 	q := `SELECT id, server, protocol, certificate, alias FROM images_source WHERE image_id=?`
 
 	id := 0
@@ -106,7 +106,7 @@ func (n *Node) ImageSourceGet(imageId int) (int, api.ImageSource, error) {
 
 	arg1 := []interface{}{imageId}
 	arg2 := []interface{}{&id, &result.Server, &protocolInt, &result.Certificate, &result.Alias}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return -1, api.ImageSource{}, NoSuchObjectError
@@ -129,7 +129,7 @@ func (n *Node) ImageSourceGet(imageId int) (int, api.ImageSource, error) {
 // Try to find a source entry of a locally cached image that matches
 // the given remote details (server, protocol and alias). Return the
 // fingerprint linked to the matching entry, if any.
-func (n *Node) ImageSourceGetCachedFingerprint(server string, protocol string, alias string) (string, error) {
+func (c *Cluster) ImageSourceGetCachedFingerprint(server string, protocol string, alias string) (string, error) {
 	protocolInt := -1
 	for protoInt, protoString := range ImageSourceProtocol {
 		if protoString == protocol {
@@ -152,7 +152,7 @@ func (n *Node) ImageSourceGetCachedFingerprint(server string, protocol string, a
 
 	arg1 := []interface{}{server, protocolInt, alias}
 	arg2 := []interface{}{&fingerprint}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return "", NoSuchObjectError
@@ -165,13 +165,13 @@ func (n *Node) ImageSourceGetCachedFingerprint(server string, protocol string, a
 }
 
 // Whether an image with the given fingerprint exists.
-func (n *Node) ImageExists(fingerprint string) (bool, error) {
+func (c *Cluster) ImageExists(fingerprint string) (bool, error) {
 	var exists bool
 	var err error
 	query := "SELECT COUNT(*) > 0 FROM images WHERE fingerprint=?"
 	inargs := []interface{}{fingerprint}
 	outargs := []interface{}{&exists}
-	err = dbQueryRowScan(n.db, query, inargs, outargs)
+	err = dbQueryRowScan(c.db, query, inargs, outargs)
 	return exists, err
 }
 
@@ -180,7 +180,7 @@ func (n *Node) ImageExists(fingerprint string) (bool, error) {
 // pass a shortform and will get the full fingerprint.
 // There can never be more than one image with a given fingerprint, as it is
 // enforced by a UNIQUE constraint in the schema.
-func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (int, *api.Image, error) {
+func (c *Cluster) ImageGet(fingerprint string, public bool, strictMatching bool) (int, *api.Image, error) {
 	var err error
 	var create, expire, used, upload *time.Time // These hold the db-returned times
 
@@ -212,7 +212,7 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 		query += " AND public=1"
 	}
 
-	err = dbQueryRowScan(n.db, query, inargs, outfmt)
+	err = dbQueryRowScan(c.db, query, inargs, outfmt)
 	if err != nil {
 		return -1, nil, err // Likely: there are no rows for this fingerprint
 	}
@@ -223,7 +223,7 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 		count := 0
 		outfmt := []interface{}{&count}
 
-		err = dbQueryRowScan(n.db, query, inargs, outfmt)
+		err = dbQueryRowScan(c.db, query, inargs, outfmt)
 		if err != nil {
 			return -1, nil, err
 		}
@@ -262,7 +262,7 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 	var key, value, name, desc string
 	inargs = []interface{}{id}
 	outfmt = []interface{}{key, value}
-	results, err := queryScan(n.db, q, inargs, outfmt)
+	results, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -280,7 +280,7 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 	q = "SELECT name, description FROM images_aliases WHERE image_id=?"
 	inargs = []interface{}{id}
 	outfmt = []interface{}{name, desc}
-	results, err = queryScan(n.db, q, inargs, outfmt)
+	results, err = queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -295,7 +295,7 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 
 	image.Aliases = aliases
 
-	_, source, err := n.ImageSourceGet(id)
+	_, source, err := c.ImageSourceGet(id)
 	if err == nil {
 		image.UpdateSource = &source
 	}
@@ -303,8 +303,8 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 	return id, &image, nil
 }
 
-func (n *Node) ImageDelete(id int) error {
-	_, err := exec(n.db, "DELETE FROM images WHERE id=?", id)
+func (c *Cluster) ImageDelete(id int) error {
+	_, err := exec(c.db, "DELETE FROM images WHERE id=?", id)
 	if err != nil {
 		return err
 	}
@@ -312,12 +312,12 @@ func (n *Node) ImageDelete(id int) error {
 	return nil
 }
 
-func (n *Node) ImageAliasesGet() ([]string, error) {
+func (c *Cluster) ImageAliasesGet() ([]string, error) {
 	q := "SELECT name FROM images_aliases"
 	var name string
 	inargs := []interface{}{}
 	outfmt := []interface{}{name}
-	results, err := queryScan(n.db, q, inargs, outfmt)
+	results, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return nil, err
 	}
@@ -328,7 +328,7 @@ func (n *Node) ImageAliasesGet() ([]string, error) {
 	return names, nil
 }
 
-func (n *Node) ImageAliasGet(name string, isTrustedClient bool) (int, api.ImageAliasesEntry, error) {
+func (c *Cluster) ImageAliasGet(name string, isTrustedClient bool) (int, api.ImageAliasesEntry, error) {
 	q := `SELECT images_aliases.id, images.fingerprint, images_aliases.description
 			 FROM images_aliases
 			 INNER JOIN images
@@ -344,7 +344,7 @@ func (n *Node) ImageAliasGet(name string, isTrustedClient bool) (int, api.ImageA
 
 	arg1 := []interface{}{name}
 	arg2 := []interface{}{&id, &fingerprint, &description}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return -1, entry, NoSuchObjectError
@@ -360,53 +360,53 @@ func (n *Node) ImageAliasGet(name string, isTrustedClient bool) (int, api.ImageA
 	return id, entry, nil
 }
 
-func (n *Node) ImageAliasRename(id int, name string) error {
-	_, err := exec(n.db, "UPDATE images_aliases SET name=? WHERE id=?", name, id)
+func (c *Cluster) ImageAliasRename(id int, name string) error {
+	_, err := exec(c.db, "UPDATE images_aliases SET name=? WHERE id=?", name, id)
 	return err
 }
 
-func (n *Node) ImageAliasDelete(name string) error {
-	_, err := exec(n.db, "DELETE FROM images_aliases WHERE name=?", name)
+func (c *Cluster) ImageAliasDelete(name string) error {
+	_, err := exec(c.db, "DELETE FROM images_aliases WHERE name=?", name)
 	return err
 }
 
-func (n *Node) ImageAliasesMove(source int, destination int) error {
-	_, err := exec(n.db, "UPDATE images_aliases SET image_id=? WHERE image_id=?", destination, source)
+func (c *Cluster) ImageAliasesMove(source int, destination int) error {
+	_, err := exec(c.db, "UPDATE images_aliases SET image_id=? WHERE image_id=?", destination, source)
 	return err
 }
 
 // Insert an alias ento the database.
-func (n *Node) ImageAliasAdd(name string, imageID int, desc string) error {
+func (c *Cluster) ImageAliasAdd(name string, imageID int, desc string) error {
 	stmt := `INSERT INTO images_aliases (name, image_id, description) values (?, ?, ?)`
-	_, err := exec(n.db, stmt, name, imageID, desc)
+	_, err := exec(c.db, stmt, name, imageID, desc)
 	return err
 }
 
-func (n *Node) ImageAliasUpdate(id int, imageID int, desc string) error {
+func (c *Cluster) ImageAliasUpdate(id int, imageID int, desc string) error {
 	stmt := `UPDATE images_aliases SET image_id=?, description=? WHERE id=?`
-	_, err := exec(n.db, stmt, imageID, desc, id)
+	_, err := exec(c.db, stmt, imageID, desc, id)
 	return err
 }
 
-func (n *Node) ImageLastAccessUpdate(fingerprint string, date time.Time) error {
+func (c *Cluster) ImageLastAccessUpdate(fingerprint string, date time.Time) error {
 	stmt := `UPDATE images SET last_use_date=? WHERE fingerprint=?`
-	_, err := exec(n.db, stmt, date, fingerprint)
+	_, err := exec(c.db, stmt, date, fingerprint)
 	return err
 }
 
-func (n *Node) ImageLastAccessInit(fingerprint string) error {
+func (c *Cluster) ImageLastAccessInit(fingerprint string) error {
 	stmt := `UPDATE images SET cached=1, last_use_date=strftime("%s") WHERE fingerprint=?`
-	_, err := exec(n.db, stmt, fingerprint)
+	_, err := exec(c.db, stmt, fingerprint)
 	return err
 }
 
-func (n *Node) ImageUpdate(id int, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string) error {
+func (c *Cluster) ImageUpdate(id int, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string) error {
 	arch, err := osarch.ArchitectureId(architecture)
 	if err != nil {
 		arch = 0
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -461,13 +461,13 @@ func (n *Node) ImageUpdate(id int, fname string, sz int64, public bool, autoUpda
 	return nil
 }
 
-func (n *Node) ImageInsert(fp string, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string) error {
+func (c *Cluster) ImageInsert(fp string, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string) error {
 	arch, err := osarch.ArchitectureId(architecture)
 	if err != nil {
 		arch = 0
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -495,14 +495,14 @@ func (n *Node) ImageInsert(fp string, fname string, sz int64, public bool, autoU
 		return err
 	}
 
-	if len(properties) > 0 {
-		id64, err := result.LastInsertId()
-		if err != nil {
-			tx.Rollback()
-			return err
-		}
-		id := int(id64)
+	id64, err := result.LastInsertId()
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+	id := int(id64)
 
+	if len(properties) > 0 {
 		pstmt, err := tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, 0, ?, ?)`)
 		if err != nil {
 			tx.Rollback()
@@ -522,6 +522,12 @@ func (n *Node) ImageInsert(fp string, fname string, sz int64, public bool, autoU
 
 	}
 
+	_, err = tx.Exec("INSERT INTO images_nodes(image_id, node_id) VALUES(?, ?)", id, c.id)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
 	if err := TxCommit(tx); err != nil {
 		return err
 	}
@@ -550,7 +556,7 @@ func (c *Cluster) ImageGetPools(imageFingerprint string) ([]int64, error) {
 }
 
 // Get the names of all storage pools on which a given image exists.
-func (n *Node) ImageGetPoolNamesFromIDs(poolIDs []int64) ([]string, error) {
+func (c *Cluster) ImageGetPoolNamesFromIDs(poolIDs []int64) ([]string, error) {
 	var poolName string
 	query := "SELECT name FROM storage_pools WHERE id=?"
 
@@ -559,7 +565,7 @@ func (n *Node) ImageGetPoolNamesFromIDs(poolIDs []int64) ([]string, error) {
 		inargs := []interface{}{poolID}
 		outargs := []interface{}{poolName}
 
-		result, err := queryScan(n.db, query, inargs, outargs)
+		result, err := queryScan(c.db, query, inargs, outargs)
 		if err != nil {
 			return []string{}, err
 		}
@@ -573,7 +579,7 @@ func (n *Node) ImageGetPoolNamesFromIDs(poolIDs []int64) ([]string, error) {
 }
 
 // ImageUploadedAt updates the upload_date column and an image row.
-func (n *Node) ImageUploadedAt(id int, uploadedAt time.Time) error {
-	_, err := exec(n.db, "UPDATE images SET upload_date=? WHERE id=?", uploadedAt, id)
+func (c *Cluster) ImageUploadedAt(id int, uploadedAt time.Time) error {
+	_, err := exec(c.db, "UPDATE images SET upload_date=? WHERE id=?", uploadedAt, id)
 	return err
 }
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 9744bb8a3..4a424bf7e 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -65,6 +65,13 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 		return errors.Wrap(err, "failed to start cluster database transaction")
 	}
 
+	// Delete the default profile in the cluster database, which always
+	// gets created no matter what.
+	_, err = tx.Exec("DELETE FROM profiles WHERE id=1")
+	if err != nil {
+		return errors.Wrap(err, "failed to delete default profile")
+	}
+
 	for _, table := range preClusteringTables {
 		for i, row := range dump.Data[table] {
 			for i, element := range row {
@@ -88,6 +95,8 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 			}
 
 			switch table {
+			case "containers":
+				fallthrough
 			case "networks_config":
 				appendNodeID()
 			case "storage_pools_config":
@@ -117,6 +126,28 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 			if n != 1 {
 				return fmt.Errorf("could not insert %d int %s", i, table)
 			}
+
+			// Also insert the image ID -> node ID association.
+			if table == "images" {
+				stmt := "INSERT INTO images_nodes(image_id, node_id) VALUES(?, 1)"
+				var imageID int64
+				for i, column := range columns {
+					if column == "id" {
+						imageID = row[i].(int64)
+						if err != nil {
+							return err
+						}
+						break
+					}
+				}
+				if imageID == 0 {
+					return fmt.Errorf("image has invalid ID")
+				}
+				_, err := tx.Exec(stmt, row...)
+				if err != nil {
+					return errors.Wrapf(err, "failed to associate image to node")
+				}
+			}
 		}
 	}
 
@@ -137,8 +168,18 @@ type Dump struct {
 var preClusteringTables = []string{
 	"certificates",
 	"config",
+	"containers",
+	"containers_config",
+	"containers_devices",
+	"containers_devices_config",
+	"containers_profiles",
+	"images",
+	"images_aliases",
+	"images_properties",
+	"images_source",
 	"networks",
 	"networks_config",
+	"profiles",
 	"storage_pools",
 	"storage_pools_config",
 	"storage_volumes",
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 8d2b392a7..720a9dfb1 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -98,6 +98,7 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 		preClusteringNodeSchema,
 		"INSERT INTO certificates VALUES (1, 'abcd:efgh', 1, 'foo', 'FOO')",
 		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
+		"INSERT INTO images VALUES(1, 'abc', 'x.gz', 16, 0, 1, 0, 0, strftime('%d-%m-%Y', 'now'), 0, 0, 0)",
 		"INSERT INTO networks VALUES(1, 'lxcbr0', 'LXD bridge')",
 		"INSERT INTO networks_config VALUES(1, 1, 'ipv4.nat', 'true')",
 		"INSERT INTO storage_pools VALUES (1, 'default', 'dir', '')",
diff --git a/lxd/db/node/schema.go b/lxd/db/node/schema.go
index fd88a6ada..fcd18b658 100644
--- a/lxd/db/node/schema.go
+++ b/lxd/db/node/schema.go
@@ -12,103 +12,12 @@ CREATE TABLE config (
     value TEXT,
     UNIQUE (key)
 );
-CREATE TABLE "containers" (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    architecture INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    ephemeral INTEGER NOT NULL DEFAULT 0,
-    creation_date DATETIME NOT NULL DEFAULT 0,
-    stateful INTEGER NOT NULL DEFAULT 0,
-    last_use_date DATETIME,
-    description TEXT,
-    UNIQUE (name)
-);
-CREATE TABLE containers_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    container_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
-    UNIQUE (container_id, key)
-);
-CREATE TABLE containers_devices (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    container_id INTEGER NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    type INTEGER NOT NULL default 0,
-    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
-    UNIQUE (container_id, name)
-);
-CREATE TABLE containers_devices_config (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    container_device_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (container_device_id) REFERENCES containers_devices (id) ON DELETE CASCADE,
-    UNIQUE (container_device_id, key)
-);
-CREATE TABLE containers_profiles (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    container_id INTEGER NOT NULL,
-    profile_id INTEGER NOT NULL,
-    apply_order INTEGER NOT NULL default 0,
-    UNIQUE (container_id, profile_id),
-    FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE,
-    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
-);
-CREATE TABLE images (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    fingerprint VARCHAR(255) NOT NULL,
-    filename VARCHAR(255) NOT NULL,
-    size INTEGER NOT NULL,
-    public INTEGER NOT NULL DEFAULT 0,
-    architecture INTEGER NOT NULL,
-    creation_date DATETIME,
-    expiry_date DATETIME,
-    upload_date DATETIME NOT NULL,
-    cached INTEGER NOT NULL DEFAULT 0,
-    last_use_date DATETIME,
-    auto_update INTEGER NOT NULL DEFAULT 0,
-    UNIQUE (fingerprint)
-);
-CREATE TABLE "images_aliases" (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    image_id INTEGER NOT NULL,
-    description TEXT,
-    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
-    UNIQUE (name)
-);
-CREATE TABLE images_properties (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    image_id INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
-);
-CREATE TABLE images_source (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    image_id INTEGER NOT NULL,
-    server TEXT NOT NULL,
-    protocol INTEGER NOT NULL,
-    certificate TEXT NOT NULL,
-    alias VARCHAR(255) NOT NULL,
-    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
-);
 CREATE TABLE patches (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name VARCHAR(255) NOT NULL,
     applied_at DATETIME NOT NULL,
     UNIQUE (name)
 );
-CREATE TABLE profiles (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    description TEXT,
-    UNIQUE (name)
-);
 CREATE TABLE profiles_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     profile_id INTEGER NOT NULL,
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index 1153ecf8f..d4ce9efea 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -119,8 +119,18 @@ CREATE TABLE raft_nodes (
 );
 DELETE FROM config WHERE NOT key='core.https_address';
 DROP TABLE certificates;
+DROP TABLE containers_devices_config;
+DROP TABLE containers_devices;
+DROP TABLE containers_config;
+DROP TABLE containers_profiles;
+DROP TABLE containers;
+DROP TABLE images_aliases;
+DROP TABLE images_properties;
+DROP TABLE images_source;
+DROP TABLE images;
 DROP TABLE networks_config;
 DROP TABLE networks;
+DROP TABLE profiles;
 DROP TABLE storage_volumes_config;
 DROP TABLE storage_volumes;
 DROP TABLE storage_pools_config;
diff --git a/lxd/db/profiles.go b/lxd/db/profiles.go
index 61bbd386f..bddfb317c 100644
--- a/lxd/db/profiles.go
+++ b/lxd/db/profiles.go
@@ -11,12 +11,12 @@ import (
 )
 
 // Profiles returns a string list of profiles.
-func (n *Node) Profiles() ([]string, error) {
+func (c *Cluster) Profiles() ([]string, error) {
 	q := fmt.Sprintf("SELECT name FROM profiles")
 	inargs := []interface{}{}
 	var name string
 	outfmt := []interface{}{name}
-	result, err := queryScan(n.db, q, inargs, outfmt)
+	result, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return []string{}, err
 	}
@@ -29,24 +29,24 @@ func (n *Node) Profiles() ([]string, error) {
 	return response, nil
 }
 
-func (n *Node) ProfileGet(name string) (int64, *api.Profile, error) {
+func (c *Cluster) ProfileGet(name string) (int64, *api.Profile, error) {
 	id := int64(-1)
 	description := sql.NullString{}
 
 	q := "SELECT id, description FROM profiles WHERE name=?"
 	arg1 := []interface{}{name}
 	arg2 := []interface{}{&id, &description}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		return -1, nil, err
 	}
 
-	config, err := n.ProfileConfig(name)
+	config, err := c.ProfileConfig(name)
 	if err != nil {
 		return -1, nil, err
 	}
 
-	devices, err := n.Devices(name, true)
+	devices, err := c.Devices(name, true)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -62,10 +62,10 @@ func (n *Node) ProfileGet(name string) (int64, *api.Profile, error) {
 	return id, &profile, nil
 }
 
-func (n *Node) ProfileCreate(profile string, description string, config map[string]string,
+func (c *Cluster) ProfileCreate(profile string, description string, config map[string]string,
 	devices types.Devices) (int64, error) {
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return -1, err
 	}
@@ -100,15 +100,15 @@ func (n *Node) ProfileCreate(profile string, description string, config map[stri
 	return id, nil
 }
 
-func (n *Node) ProfileCreateDefault() error {
-	id, _, _ := n.ProfileGet("default")
+func (c *Cluster) ProfileCreateDefault() error {
+	id, _, _ := c.ProfileGet("default")
 
 	if id != -1 {
 		// default profile already exists
 		return nil
 	}
 
-	_, err := n.ProfileCreate("default", "Default LXD profile", map[string]string{}, types.Devices{})
+	_, err := c.ProfileCreate("default", "Default LXD profile", map[string]string{}, types.Devices{})
 	if err != nil {
 		return err
 	}
@@ -117,7 +117,7 @@ func (n *Node) ProfileCreateDefault() error {
 }
 
 // Get the profile configuration map from the DB
-func (n *Node) ProfileConfig(name string) (map[string]string, error) {
+func (c *Cluster) ProfileConfig(name string) (map[string]string, error) {
 	var key, value string
 	query := `
         SELECT
@@ -127,7 +127,7 @@ func (n *Node) ProfileConfig(name string) (map[string]string, error) {
 		WHERE name=?`
 	inargs := []interface{}{name}
 	outfmt := []interface{}{key, value}
-	results, err := queryScan(n.db, query, inargs, outfmt)
+	results, err := queryScan(c.db, query, inargs, outfmt)
 	if err != nil {
 		return nil, fmt.Errorf("Failed to get profile '%s'", name)
 	}
@@ -139,7 +139,7 @@ func (n *Node) ProfileConfig(name string) (map[string]string, error) {
 		 */
 		query := "SELECT id FROM profiles WHERE name=?"
 		var id int
-		results, err := queryScan(n.db, query, []interface{}{name}, []interface{}{id})
+		results, err := queryScan(c.db, query, []interface{}{name}, []interface{}{id})
 		if err != nil {
 			return nil, err
 		}
@@ -161,13 +161,13 @@ func (n *Node) ProfileConfig(name string) (map[string]string, error) {
 	return config, nil
 }
 
-func (n *Node) ProfileDelete(name string) error {
-	id, _, err := n.ProfileGet(name)
+func (c *Cluster) ProfileDelete(name string) error {
+	id, _, err := c.ProfileGet(name)
 	if err != nil {
 		return err
 	}
 
-	_, err = exec(n.db, "DELETE FROM profiles WHERE id=?", id)
+	_, err = exec(c.db, "DELETE FROM profiles WHERE id=?", id)
 	if err != nil {
 		return err
 	}
@@ -175,8 +175,8 @@ func (n *Node) ProfileDelete(name string) error {
 	return nil
 }
 
-func (n *Node) ProfileUpdate(name string, newName string) error {
-	tx, err := begin(n.db)
+func (c *Cluster) ProfileUpdate(name string, newName string) error {
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -236,7 +236,7 @@ func ProfileConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {
 	return nil
 }
 
-func (n *Node) ProfileContainersGet(profile string) ([]string, error) {
+func (c *Cluster) ProfileContainersGet(profile string) ([]string, error) {
 	q := `SELECT containers.name FROM containers JOIN containers_profiles
 		ON containers.id == containers_profiles.container_id
 		JOIN profiles ON containers_profiles.profile_id == profiles.id
@@ -247,7 +247,7 @@ func (n *Node) ProfileContainersGet(profile string) ([]string, error) {
 	var name string
 	outfmt := []interface{}{name}
 
-	output, err := queryScan(n.db, q, inargs, outfmt)
+	output, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return results, err
 	}
@@ -259,13 +259,13 @@ func (n *Node) ProfileContainersGet(profile string) ([]string, error) {
 	return results, nil
 }
 
-func (n *Node) ProfileCleanupLeftover() error {
+func (c *Cluster) ProfileCleanupLeftover() error {
 	stmt := `
 DELETE FROM profiles_config WHERE profile_id NOT IN (SELECT id FROM profiles);
 DELETE FROM profiles_devices WHERE profile_id NOT IN (SELECT id FROM profiles);
 DELETE FROM profiles_devices_config WHERE profile_device_id NOT IN (SELECT id FROM profiles_devices);
 `
-	_, err := n.db.Exec(stmt)
+	_, err := c.db.Exec(stmt)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/devices.go b/lxd/devices.go
index 186dd8a52..0f41b1431 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -604,7 +604,7 @@ func deviceTaskBalance(s *state.State) {
 	}
 
 	// Iterate through the containers
-	containers, err := s.Node.ContainersList(db.CTypeRegular)
+	containers, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		logger.Error("problem loading containers list", log.Ctx{"err": err})
 		return
@@ -730,7 +730,7 @@ func deviceNetworkPriority(s *state.State, netif string) {
 		return
 	}
 
-	containers, err := s.Node.ContainersList(db.CTypeRegular)
+	containers, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return
 	}
@@ -761,7 +761,7 @@ func deviceNetworkPriority(s *state.State, netif string) {
 }
 
 func deviceUSBEvent(s *state.State, usb usbDevice) {
-	containers, err := s.Node.ContainersList(db.CTypeRegular)
+	containers, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		logger.Error("problem loading containers list", log.Ctx{"err": err})
 		return
diff --git a/lxd/devlxd.go b/lxd/devlxd.go
index 0bbe3113c..503ae9489 100644
--- a/lxd/devlxd.go
+++ b/lxd/devlxd.go
@@ -449,7 +449,7 @@ func findContainerForPid(pid int32, d *Daemon) (container, error) {
 		return nil, err
 	}
 
-	containers, err := d.db.ContainersList(db.CTypeRegular)
+	containers, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/images.go b/lxd/images.go
index f14084fb7..1dba3e4a0 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -196,7 +196,7 @@ func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, builddir st
 
 	info.Fingerprint = fmt.Sprintf("%x", sha256.Sum(nil))
 
-	_, _, err = d.db.ImageGet(info.Fingerprint, false, true)
+	_, _, err = d.cluster.ImageGet(info.Fingerprint, false, true)
 	if err == nil {
 		return nil, fmt.Errorf("The image already exists: %s", info.Fingerprint)
 	}
@@ -212,7 +212,7 @@ func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, builddir st
 	info.Properties = req.Properties
 
 	// Create the database entry
-	err = d.db.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+	err = d.cluster.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 	if err != nil {
 		return nil, err
 	}
@@ -237,7 +237,7 @@ func imgPostRemoteInfo(d *Daemon, req api.ImagesPost, op *operation) (*api.Image
 		return nil, err
 	}
 
-	id, info, err := d.db.ImageGet(info.Fingerprint, false, true)
+	id, info, err := d.cluster.ImageGet(info.Fingerprint, false, true)
 	if err != nil {
 		return nil, err
 	}
@@ -249,7 +249,7 @@ func imgPostRemoteInfo(d *Daemon, req api.ImagesPost, op *operation) (*api.Image
 
 	// Update the DB record if needed
 	if req.Public || req.AutoUpdate || req.Filename != "" || len(req.Properties) > 0 {
-		err = d.db.ImageUpdate(id, req.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+		err = d.cluster.ImageUpdate(id, req.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 		if err != nil {
 			return nil, err
 		}
@@ -306,7 +306,7 @@ func imgPostURLInfo(d *Daemon, req api.ImagesPost, op *operation) (*api.Image, e
 		return nil, err
 	}
 
-	id, info, err := d.db.ImageGet(info.Fingerprint, false, false)
+	id, info, err := d.cluster.ImageGet(info.Fingerprint, false, false)
 	if err != nil {
 		return nil, err
 	}
@@ -317,7 +317,7 @@ func imgPostURLInfo(d *Daemon, req api.ImagesPost, op *operation) (*api.Image, e
 	}
 
 	if req.Public || req.AutoUpdate || req.Filename != "" || len(req.Properties) > 0 {
-		err = d.db.ImageUpdate(id, req.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+		err = d.cluster.ImageUpdate(id, req.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 		if err != nil {
 			return nil, err
 		}
@@ -514,7 +514,7 @@ func getImgPostInfo(d *Daemon, r *http.Request, builddir string, post *os.File)
 	}
 
 	// Check if the image already exists
-	exists, err := d.db.ImageExists(info.Fingerprint)
+	exists, err := d.cluster.ImageExists(info.Fingerprint)
 	if err != nil {
 		return nil, err
 	}
@@ -522,7 +522,7 @@ func getImgPostInfo(d *Daemon, r *http.Request, builddir string, post *os.File)
 		return nil, fmt.Errorf("Image with same fingerprint already exists")
 	}
 	// Create the database entry
-	err = d.db.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+	err = d.cluster.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 	if err != nil {
 		return nil, err
 	}
@@ -637,17 +637,17 @@ func imagesPost(d *Daemon, r *http.Request) Response {
 
 		// Apply any provided alias
 		for _, alias := range req.Aliases {
-			_, _, err := d.db.ImageAliasGet(alias.Name, true)
+			_, _, err := d.cluster.ImageAliasGet(alias.Name, true)
 			if err == nil {
 				return fmt.Errorf("Alias already exists: %s", alias.Name)
 			}
 
-			id, _, err := d.db.ImageGet(info.Fingerprint, false, false)
+			id, _, err := d.cluster.ImageGet(info.Fingerprint, false, false)
 			if err != nil {
 				return err
 			}
 
-			err = d.db.ImageAliasAdd(alias.Name, id, alias.Description)
+			err = d.cluster.ImageAliasAdd(alias.Name, id, alias.Description)
 			if err != nil {
 				return err
 			}
@@ -713,7 +713,7 @@ func getImageMetadata(fname string) (*api.ImageMetadata, error) {
 }
 
 func doImagesGet(d *Daemon, recursion bool, public bool) (interface{}, error) {
-	results, err := d.db.ImagesGet(public)
+	results, err := d.cluster.ImagesGet(public)
 	if err != nil {
 		return []string{}, err
 	}
@@ -726,7 +726,7 @@ func doImagesGet(d *Daemon, recursion bool, public bool) (interface{}, error) {
 			url := fmt.Sprintf("/%s/images/%s", version.APIVersion, name)
 			resultString[i] = url
 		} else {
-			image, response := doImageGet(d.db, name, public)
+			image, response := doImageGet(d.cluster, name, public)
 			if response != nil {
 				continue
 			}
@@ -780,14 +780,14 @@ func autoUpdateImagesTask(d *Daemon) (task.Func, task.Schedule) {
 func autoUpdateImages(ctx context.Context, d *Daemon) {
 	logger.Infof("Updating images")
 
-	images, err := d.db.ImagesGet(false)
+	images, err := d.cluster.ImagesGet(false)
 	if err != nil {
 		logger.Error("Unable to retrieve the list of images", log.Ctx{"err": err})
 		return
 	}
 
 	for _, fingerprint := range images {
-		id, info, err := d.db.ImageGet(fingerprint, false, true)
+		id, info, err := d.cluster.ImageGet(fingerprint, false, true)
 		if err != nil {
 			logger.Error("Error loading image", log.Ctx{"err": err, "fp": fingerprint})
 			continue
@@ -819,7 +819,7 @@ func autoUpdateImages(ctx context.Context, d *Daemon) {
 // Returns whether the image has been updated.
 func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image) error {
 	fingerprint := info.Fingerprint
-	_, source, err := d.db.ImageSourceGet(id)
+	_, source, err := d.cluster.ImageSourceGet(id)
 	if err != nil {
 		logger.Error("Error getting source image", log.Ctx{"err": err, "fp": fingerprint})
 		return err
@@ -834,7 +834,7 @@ func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image) error {
 	}
 
 	// Translate the IDs to poolNames.
-	poolNames, err := d.db.ImageGetPoolNamesFromIDs(poolIDs)
+	poolNames, err := d.cluster.ImageGetPoolNamesFromIDs(poolIDs)
 	if err != nil {
 		logger.Error("Error getting image pools", log.Ctx{"err": err, "fp": fingerprint})
 		return err
@@ -873,27 +873,27 @@ func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image) error {
 			continue
 		}
 
-		newId, _, err := d.db.ImageGet(hash, false, true)
+		newId, _, err := d.cluster.ImageGet(hash, false, true)
 		if err != nil {
 			logger.Error("Error loading image", log.Ctx{"err": err, "fp": hash})
 			continue
 		}
 
 		if info.Cached {
-			err = d.db.ImageLastAccessInit(hash)
+			err = d.cluster.ImageLastAccessInit(hash)
 			if err != nil {
 				logger.Error("Error setting cached flag", log.Ctx{"err": err, "fp": hash})
 				continue
 			}
 		}
 
-		err = d.db.ImageLastAccessUpdate(hash, info.LastUsedAt)
+		err = d.cluster.ImageLastAccessUpdate(hash, info.LastUsedAt)
 		if err != nil {
 			logger.Error("Error setting last use date", log.Ctx{"err": err, "fp": hash})
 			continue
 		}
 
-		err = d.db.ImageAliasesMove(id, newId)
+		err = d.cluster.ImageAliasesMove(id, newId)
 		if err != nil {
 			logger.Error("Error moving aliases", log.Ctx{"err": err, "fp": hash})
 			continue
@@ -934,7 +934,7 @@ func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image) error {
 	}
 
 	// Remove the database entry for the image.
-	if err = d.db.ImageDelete(id); err != nil {
+	if err = d.cluster.ImageDelete(id); err != nil {
 		logger.Debugf("Error deleting image from database %s: %s", fname, err)
 	}
 
@@ -990,7 +990,7 @@ func pruneExpiredImages(ctx context.Context, d *Daemon) {
 	}
 
 	// Get the list of expired images.
-	images, err := d.db.ImagesGetExpired(expiry)
+	images, err := d.cluster.ImagesGetExpired(expiry)
 	if err != nil {
 		logger.Error("Unable to retrieve the list of expired images", log.Ctx{"err": err})
 		return
@@ -1015,7 +1015,7 @@ func pruneExpiredImages(ctx context.Context, d *Daemon) {
 		}
 
 		// Translate the IDs to poolNames.
-		poolNames, err := d.db.ImageGetPoolNamesFromIDs(poolIDs)
+		poolNames, err := d.cluster.ImageGetPoolNamesFromIDs(poolIDs)
 		if err != nil {
 			continue
 		}
@@ -1046,13 +1046,13 @@ func pruneExpiredImages(ctx context.Context, d *Daemon) {
 			}
 		}
 
-		imgID, _, err := d.db.ImageGet(fp, false, false)
+		imgID, _, err := d.cluster.ImageGet(fp, false, false)
 		if err != nil {
 			logger.Debugf("Error retrieving image info %s: %s", fp, err)
 		}
 
 		// Remove the database entry for the image.
-		if err = d.db.ImageDelete(imgID); err != nil {
+		if err = d.cluster.ImageDelete(imgID); err != nil {
 			logger.Debugf("Error deleting image %s from database: %s", fp, err)
 		}
 	}
@@ -1082,7 +1082,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 	deleteFromAllPools := func() error {
 		// Use the fingerprint we received in a LIKE query and use the full
 		// fingerprint we receive from the database in all further queries.
-		imgID, imgInfo, err := d.db.ImageGet(fingerprint, false, false)
+		imgID, imgInfo, err := d.cluster.ImageGet(fingerprint, false, false)
 		if err != nil {
 			return err
 		}
@@ -1092,7 +1092,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 			return err
 		}
 
-		pools, err := d.db.ImageGetPoolNamesFromIDs(poolIDs)
+		pools, err := d.cluster.ImageGetPoolNamesFromIDs(poolIDs)
 		if err != nil {
 			return err
 		}
@@ -1123,7 +1123,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 		}
 
 		// Remove the database entry for the image.
-		return d.db.ImageDelete(imgID)
+		return d.cluster.ImageDelete(imgID)
 	}
 
 	rmimg := func(op *operation) error {
@@ -1141,7 +1141,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
-func doImageGet(db *db.Node, fingerprint string, public bool) (*api.Image, Response) {
+func doImageGet(db *db.Cluster, fingerprint string, public bool) (*api.Image, Response) {
 	_, imgInfo, err := db.ImageGet(fingerprint, public, false)
 	if err != nil {
 		return nil, SmartError(err)
@@ -1185,7 +1185,7 @@ func imageGet(d *Daemon, r *http.Request) Response {
 	public := d.checkTrustedClient(r) != nil
 	secret := r.FormValue("secret")
 
-	info, response := doImageGet(d.db, fingerprint, false)
+	info, response := doImageGet(d.cluster, fingerprint, false)
 	if response != nil {
 		return response
 	}
@@ -1201,7 +1201,7 @@ func imageGet(d *Daemon, r *http.Request) Response {
 func imagePut(d *Daemon, r *http.Request) Response {
 	// Get current value
 	fingerprint := mux.Vars(r)["fingerprint"]
-	id, info, err := d.db.ImageGet(fingerprint, false, false)
+	id, info, err := d.cluster.ImageGet(fingerprint, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1218,7 +1218,7 @@ func imagePut(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
-	err = d.db.ImageUpdate(id, info.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, req.Properties)
+	err = d.cluster.ImageUpdate(id, info.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, req.Properties)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1229,7 +1229,7 @@ func imagePut(d *Daemon, r *http.Request) Response {
 func imagePatch(d *Daemon, r *http.Request) Response {
 	// Get current value
 	fingerprint := mux.Vars(r)["fingerprint"]
-	id, info, err := d.db.ImageGet(fingerprint, false, false)
+	id, info, err := d.cluster.ImageGet(fingerprint, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1284,7 +1284,7 @@ func imagePatch(d *Daemon, r *http.Request) Response {
 		info.Properties = properties
 	}
 
-	err = d.db.ImageUpdate(id, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+	err = d.cluster.ImageUpdate(id, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1305,17 +1305,17 @@ func aliasesPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// This is just to see if the alias name already exists.
-	_, _, err := d.db.ImageAliasGet(req.Name, true)
+	_, _, err := d.cluster.ImageAliasGet(req.Name, true)
 	if err == nil {
 		return Conflict
 	}
 
-	id, _, err := d.db.ImageGet(req.Target, false, false)
+	id, _, err := d.cluster.ImageGet(req.Target, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	err = d.db.ImageAliasAdd(req.Name, id, req.Description)
+	err = d.cluster.ImageAliasAdd(req.Name, id, req.Description)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1326,7 +1326,7 @@ func aliasesPost(d *Daemon, r *http.Request) Response {
 func aliasesGet(d *Daemon, r *http.Request) Response {
 	recursion := util.IsRecursionRequest(r)
 
-	names, err := d.db.ImageAliasesGet()
+	names, err := d.cluster.ImageAliasesGet()
 	if err != nil {
 		return BadRequest(err)
 	}
@@ -1338,7 +1338,7 @@ func aliasesGet(d *Daemon, r *http.Request) Response {
 			responseStr = append(responseStr, url)
 
 		} else {
-			_, alias, err := d.db.ImageAliasGet(name, d.checkTrustedClient(r) == nil)
+			_, alias, err := d.cluster.ImageAliasGet(name, d.checkTrustedClient(r) == nil)
 			if err != nil {
 				continue
 			}
@@ -1356,7 +1356,7 @@ func aliasesGet(d *Daemon, r *http.Request) Response {
 func aliasGet(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
 
-	_, alias, err := d.db.ImageAliasGet(name, d.checkTrustedClient(r) == nil)
+	_, alias, err := d.cluster.ImageAliasGet(name, d.checkTrustedClient(r) == nil)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1366,12 +1366,12 @@ func aliasGet(d *Daemon, r *http.Request) Response {
 
 func aliasDelete(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
-	_, _, err := d.db.ImageAliasGet(name, true)
+	_, _, err := d.cluster.ImageAliasGet(name, true)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	err = d.db.ImageAliasDelete(name)
+	err = d.cluster.ImageAliasDelete(name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1382,7 +1382,7 @@ func aliasDelete(d *Daemon, r *http.Request) Response {
 func aliasPut(d *Daemon, r *http.Request) Response {
 	// Get current value
 	name := mux.Vars(r)["name"]
-	id, alias, err := d.db.ImageAliasGet(name, true)
+	id, alias, err := d.cluster.ImageAliasGet(name, true)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1402,12 +1402,12 @@ func aliasPut(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("The target field is required"))
 	}
 
-	imageId, _, err := d.db.ImageGet(req.Target, false, false)
+	imageId, _, err := d.cluster.ImageGet(req.Target, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	err = d.db.ImageAliasUpdate(id, imageId, req.Description)
+	err = d.cluster.ImageAliasUpdate(id, imageId, req.Description)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1418,7 +1418,7 @@ func aliasPut(d *Daemon, r *http.Request) Response {
 func aliasPatch(d *Daemon, r *http.Request) Response {
 	// Get current value
 	name := mux.Vars(r)["name"]
-	id, alias, err := d.db.ImageAliasGet(name, true)
+	id, alias, err := d.cluster.ImageAliasGet(name, true)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1454,12 +1454,12 @@ func aliasPatch(d *Daemon, r *http.Request) Response {
 		alias.Description = description
 	}
 
-	imageId, _, err := d.db.ImageGet(alias.Target, false, false)
+	imageId, _, err := d.cluster.ImageGet(alias.Target, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	err = d.db.ImageAliasUpdate(id, imageId, alias.Description)
+	err = d.cluster.ImageAliasUpdate(id, imageId, alias.Description)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1476,17 +1476,17 @@ func aliasPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check that the name isn't already in use
-	id, _, _ := d.db.ImageAliasGet(req.Name, true)
+	id, _, _ := d.cluster.ImageAliasGet(req.Name, true)
 	if id > 0 {
 		return Conflict
 	}
 
-	id, _, err := d.db.ImageAliasGet(name, true)
+	id, _, err := d.cluster.ImageAliasGet(name, true)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	err = d.db.ImageAliasRename(id, req.Name)
+	err = d.cluster.ImageAliasRename(id, req.Name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1500,7 +1500,7 @@ func imageExport(d *Daemon, r *http.Request) Response {
 	public := d.checkTrustedClient(r) != nil
 	secret := r.FormValue("secret")
 
-	_, imgInfo, err := d.db.ImageGet(fingerprint, false, false)
+	_, imgInfo, err := d.cluster.ImageGet(fingerprint, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1550,7 +1550,7 @@ func imageExport(d *Daemon, r *http.Request) Response {
 
 func imageSecret(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
-	_, imgInfo, err := d.db.ImageGet(fingerprint, false, false)
+	_, imgInfo, err := d.cluster.ImageGet(fingerprint, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1577,7 +1577,7 @@ func imageSecret(d *Daemon, r *http.Request) Response {
 
 func imageRefresh(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
-	imageId, imageInfo, err := d.db.ImageGet(fingerprint, false, false)
+	imageId, imageInfo, err := d.cluster.ImageGet(fingerprint, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/logging.go b/lxd/logging.go
index 8a0856f13..3408cd683 100644
--- a/lxd/logging.go
+++ b/lxd/logging.go
@@ -41,7 +41,7 @@ func expireLogs(ctx context.Context, state *state.State) error {
 	var containers []string
 	ch := make(chan struct{})
 	go func() {
-		containers, err = state.Node.ContainersList(db.CTypeRegular)
+		containers, err = state.Cluster.ContainersList(db.CTypeRegular)
 		ch <- struct{}{}
 	}()
 	select {
diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go
index 610103bc3..0cc37ae70 100644
--- a/lxd/main_activateifneeded.go
+++ b/lxd/main_activateifneeded.go
@@ -6,14 +6,20 @@ import (
 	"os"
 	"path/filepath"
 
+	"github.com/CanonicalLtd/go-sqlite3x"
 	"github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/idmap"
 	"github.com/lxc/lxd/shared/logger"
+	"github.com/mattn/go-sqlite3"
 )
 
+func init() {
+	sql.Register("dqlite_direct_access", &sqlite3.SQLiteDriver{ConnectHook: sqliteDirectAccess})
+}
+
 func cmdActivateIfNeeded(args *Args) error {
 	// Only root should run this
 	if os.Geteuid() != 0 {
@@ -56,14 +62,23 @@ func cmdActivateIfNeeded(args *Args) error {
 	}
 
 	// Look for auto-started or previously started containers
-	result, err := d.db.ContainersList(db.CTypeRegular)
+	path := filepath.Join(d.os.VarDir, "raft", "db.bin")
+	if !shared.PathExists(path) {
+		logger.Debugf("No DB, so no need to start the daemon now.")
+		return nil
+	}
+	sqldb, err = sql.Open("dqlite_direct_access", path+"?mode=ro")
 	if err != nil {
 		return err
 	}
 
+	d.cluster = db.ForLocalInspection(sqldb)
+	result, err := d.cluster.ContainersList(db.CTypeRegular)
+
 	for _, name := range result {
 		c, err := containerLoadByName(d.State(), name)
 		if err != nil {
+			sqldb.Close()
 			return err
 		}
 
@@ -72,18 +87,45 @@ func cmdActivateIfNeeded(args *Args) error {
 		autoStart := config["boot.autostart"]
 
 		if c.IsRunning() {
+			sqldb.Close()
 			logger.Debugf("Daemon has running containers, activating...")
 			_, err := lxd.ConnectLXDUnix("", nil)
 			return err
 		}
 
 		if lastState == "RUNNING" || lastState == "Running" || shared.IsTrue(autoStart) {
+			sqldb.Close()
 			logger.Debugf("Daemon has auto-started containers, activating...")
 			_, err := lxd.ConnectLXDUnix("", nil)
 			return err
 		}
 	}
 
+	sqldb.Close()
 	logger.Debugf("No need to start the daemon now.")
 	return nil
 }
+
+// Configure the sqlite connection so that it's safe to access the
+// dqlite-managed sqlite file, also without setting up raft.
+func sqliteDirectAccess(conn *sqlite3.SQLiteConn) error {
+	// Ensure journal mode is set to WAL, as this is a requirement for
+	// replication.
+	err := sqlite3x.JournalModePragma(conn, sqlite3x.JournalWal)
+	if err != nil {
+		return err
+	}
+
+	// Ensure we don't truncate or checkpoint the WAL on exit, as this
+	// would bork replication which must be in full control of the WAL
+	// file.
+	err = sqlite3x.JournalSizeLimitPragma(conn, -1)
+	if err != nil {
+		return err
+	}
+	err = sqlite3x.DatabaseNoCheckpointOnClose(conn)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/lxd/main_sql.go b/lxd/main_sql.go
index e721633bb..f22f67375 100644
--- a/lxd/main_sql.go
+++ b/lxd/main_sql.go
@@ -35,7 +35,7 @@ func cmdSQL(args *Args) error {
 	if err != nil {
 		return err
 	}
-	if strings.HasPrefix(query, "SELECT") {
+	if strings.HasPrefix(strings.ToUpper(query), "SELECT") {
 		// Print results in tabular format
 		widths := make([]int, len(result.Columns))
 		for i, column := range result.Columns {
diff --git a/lxd/main_test.go b/lxd/main_test.go
index 4bed14446..0cf2ca693 100644
--- a/lxd/main_test.go
+++ b/lxd/main_test.go
@@ -82,12 +82,12 @@ func (suite *lxdTestSuite) SetupTest() {
 	devicesMap := map[string]map[string]string{}
 	devicesMap["root"] = rootDev
 
-	defaultID, _, err := suite.d.db.ProfileGet("default")
+	defaultID, _, err := suite.d.cluster.ProfileGet("default")
 	if err != nil {
 		suite.T().Fatalf("failed to get default profile: %v", err)
 	}
 
-	tx, err := suite.d.db.Begin()
+	tx, err := suite.d.cluster.Begin()
 	if err != nil {
 		suite.T().Fatalf("failed to begin transaction: %v", err)
 	}
diff --git a/lxd/networks.go b/lxd/networks.go
index 53a506782..80ae27867 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -190,7 +190,7 @@ func doNetworkGet(d *Daemon, name string) (api.Network, error) {
 	n.Config = map[string]string{}
 
 	// Look for containers using the interface
-	cts, err := d.db.ContainersList(db.CTypeRegular)
+	cts, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return api.Network{}, err
 	}
@@ -482,7 +482,7 @@ func (n *network) IsRunning() bool {
 
 func (n *network) IsUsed() bool {
 	// Look for containers using the interface
-	cts, err := n.db.ContainersList(db.CTypeRegular)
+	cts, err := n.state.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return true
 	}
diff --git a/lxd/networks_utils.go b/lxd/networks_utils.go
index 1de47c57a..ccef0ed59 100644
--- a/lxd/networks_utils.go
+++ b/lxd/networks_utils.go
@@ -744,7 +744,7 @@ func networkUpdateStatic(s *state.State, networkName string) error {
 	defer networkStaticLock.Unlock()
 
 	// Get all the containers
-	containers, err := s.Node.ContainersList(db.CTypeRegular)
+	containers, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/patches.go b/lxd/patches.go
index 6373c85a5..43c84f6d1 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -107,11 +107,11 @@ func patchesApplyAll(d *Daemon) error {
 
 // Patches begin here
 func patchLeftoverProfileConfig(name string, d *Daemon) error {
-	return d.db.ProfileCleanupLeftover()
+	return d.cluster.ProfileCleanupLeftover()
 }
 
 func patchInvalidProfileNames(name string, d *Daemon) error {
-	profiles, err := d.db.Profiles()
+	profiles, err := d.cluster.Profiles()
 	if err != nil {
 		return err
 	}
@@ -119,7 +119,7 @@ func patchInvalidProfileNames(name string, d *Daemon) error {
 	for _, profile := range profiles {
 		if strings.Contains(profile, "/") || shared.StringInSlice(profile, []string{".", ".."}) {
 			logger.Info("Removing unreachable profile (invalid name)", log.Ctx{"name": profile})
-			err := d.db.ProfileDelete(profile)
+			err := d.cluster.ProfileDelete(profile)
 			if err != nil {
 				return err
 			}
@@ -208,25 +208,25 @@ func patchStorageApi(name string, d *Daemon) error {
 	// Check if this LXD instace currently has any containers, snapshots, or
 	// images configured. If so, we create a default storage pool in the
 	// database. Otherwise, the user will have to run LXD init.
-	cRegular, err := d.db.ContainersList(db.CTypeRegular)
+	cRegular, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
 
 	// Get list of existing snapshots.
-	cSnapshots, err := d.db.ContainersList(db.CTypeSnapshot)
+	cSnapshots, err := d.cluster.ContainersList(db.CTypeSnapshot)
 	if err != nil {
 		return err
 	}
 
 	// Get list of existing public images.
-	imgPublic, err := d.db.ImagesGet(true)
+	imgPublic, err := d.cluster.ImagesGet(true)
 	if err != nil {
 		return err
 	}
 
 	// Get list of existing private images.
-	imgPrivate, err := d.db.ImagesGet(false)
+	imgPrivate, err := d.cluster.ImagesGet(false)
 	if err != nil {
 		return err
 	}
@@ -450,7 +450,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 		}
 
 		// Check if we need to account for snapshots for this container.
-		ctSnapshots, err := d.db.ContainerGetSnapshots(ct)
+		ctSnapshots, err := d.cluster.ContainerGetSnapshots(ct)
 		if err != nil {
 			return err
 		}
@@ -1126,7 +1126,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 		}
 
 		// Check if we need to account for snapshots for this container.
-		ctSnapshots, err := d.db.ContainerGetSnapshots(ct)
+		ctSnapshots, err := d.cluster.ContainerGetSnapshots(ct)
 		if err != nil {
 			return err
 		}
@@ -1572,7 +1572,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 		}
 
 		// Check if we need to account for snapshots for this container.
-		ctSnapshots, err := d.db.ContainerGetSnapshots(ct)
+		ctSnapshots, err := d.cluster.ContainerGetSnapshots(ct)
 		if err != nil {
 			logger.Errorf("Failed to query database")
 			return err
@@ -1715,10 +1715,10 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [
 	// appropriate device including a pool is added to the default profile
 	// or the user explicitly passes the pool the container's storage volume
 	// is supposed to be created on.
-	profiles, err := d.db.Profiles()
+	profiles, err := d.cluster.Profiles()
 	if err == nil {
 		for _, pName := range profiles {
-			pID, p, err := d.db.ProfileGet(pName)
+			pID, p, err := d.cluster.ProfileGet(pName)
 			if err != nil {
 				logger.Errorf("Could not query database: %s.", err)
 				return err
@@ -1878,13 +1878,13 @@ func patchStorageApiV1(name string, d *Daemon) error {
 		return nil
 	}
 
-	cRegular, err := d.db.ContainersList(db.CTypeRegular)
+	cRegular, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
 
 	// Get list of existing snapshots.
-	cSnapshots, err := d.db.ContainersList(db.CTypeSnapshot)
+	cSnapshots, err := d.cluster.ContainersList(db.CTypeSnapshot)
 	if err != nil {
 		return err
 	}
@@ -1899,7 +1899,7 @@ func patchStorageApiV1(name string, d *Daemon) error {
 }
 
 func patchStorageApiDirCleanup(name string, d *Daemon) error {
-	fingerprints, err := d.db.ImagesGet(false)
+	fingerprints, err := d.cluster.ImagesGet(false)
 	if err != nil {
 		return err
 	}
@@ -2482,18 +2482,18 @@ func patchStorageApiDirBindMount(name string, d *Daemon) error {
 }
 
 func patchFixUploadedAt(name string, d *Daemon) error {
-	images, err := d.db.ImagesGet(false)
+	images, err := d.cluster.ImagesGet(false)
 	if err != nil {
 		return err
 	}
 
 	for _, fingerprint := range images {
-		id, image, err := d.db.ImageGet(fingerprint, false, true)
+		id, image, err := d.cluster.ImageGet(fingerprint, false, true)
 		if err != nil {
 			return err
 		}
 
-		err = d.db.ImageUploadedAt(id, image.UploadedAt)
+		err = d.cluster.ImageUploadedAt(id, image.UploadedAt)
 		if err != nil {
 			return err
 		}
@@ -2543,7 +2543,7 @@ func patchStorageApiCephSizeRemove(name string, d *Daemon) error {
 }
 
 func patchDevicesNewNamingScheme(name string, d *Daemon) error {
-	cts, err := d.db.ContainersList(db.CTypeRegular)
+	cts, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		logger.Errorf("Failed to retrieve containers from database")
 		return err
@@ -2733,7 +2733,7 @@ func patchUpdateFromV10(d *Daemon) error {
 }
 
 func patchUpdateFromV11(d *Daemon) error {
-	cNames, err := d.db.ContainersList(db.CTypeSnapshot)
+	cNames, err := d.cluster.ContainersList(db.CTypeSnapshot)
 	if err != nil {
 		return err
 	}
@@ -2804,7 +2804,7 @@ func patchUpdateFromV15(d *Daemon) error {
 	// munge all LVM-backed containers' LV names to match what is
 	// required for snapshot support
 
-	cNames, err := d.db.ContainersList(db.CTypeRegular)
+	cNames, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/profiles.go b/lxd/profiles.go
index 6bafb47f0..561633881 100644
--- a/lxd/profiles.go
+++ b/lxd/profiles.go
@@ -23,7 +23,7 @@ import (
 
 /* This is used for both profiles post and profile put */
 func profilesGet(d *Daemon, r *http.Request) Response {
-	results, err := d.db.Profiles()
+	results, err := d.cluster.Profiles()
 	if err != nil {
 		return SmartError(err)
 	}
@@ -66,7 +66,7 @@ func profilesPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("No name provided"))
 	}
 
-	_, profile, _ := d.db.ProfileGet(req.Name)
+	_, profile, _ := d.cluster.ProfileGet(req.Name)
 	if profile != nil {
 		return BadRequest(fmt.Errorf("The profile already exists"))
 	}
@@ -90,7 +90,7 @@ func profilesPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Update DB entry
-	_, err = d.db.ProfileCreate(req.Name, req.Description, req.Config, req.Devices)
+	_, err = d.cluster.ProfileCreate(req.Name, req.Description, req.Config, req.Devices)
 	if err != nil {
 		return SmartError(
 			fmt.Errorf("Error inserting %s into database: %s", req.Name, err))
@@ -105,12 +105,12 @@ var profilesCmd = Command{
 	post: profilesPost}
 
 func doProfileGet(s *state.State, name string) (*api.Profile, error) {
-	_, profile, err := s.Node.ProfileGet(name)
+	_, profile, err := s.Cluster.ProfileGet(name)
 	if err != nil {
 		return nil, err
 	}
 
-	cts, err := s.Node.ProfileContainersGet(name)
+	cts, err := s.Cluster.ProfileContainersGet(name)
 	if err != nil {
 		return nil, err
 	}
@@ -139,7 +139,7 @@ func profileGet(d *Daemon, r *http.Request) Response {
 func getContainersWithProfile(s *state.State, profile string) []container {
 	results := []container{}
 
-	output, err := s.Node.ProfileContainersGet(profile)
+	output, err := s.Cluster.ProfileContainersGet(profile)
 	if err != nil {
 		return results
 	}
@@ -159,7 +159,7 @@ func getContainersWithProfile(s *state.State, profile string) []container {
 func profilePut(d *Daemon, r *http.Request) Response {
 	// Get the profile
 	name := mux.Vars(r)["name"]
-	id, profile, err := d.db.ProfileGet(name)
+	id, profile, err := d.cluster.ProfileGet(name)
 	if err != nil {
 		return SmartError(fmt.Errorf("Failed to retrieve profile='%s'", name))
 	}
@@ -182,7 +182,7 @@ func profilePut(d *Daemon, r *http.Request) Response {
 func profilePatch(d *Daemon, r *http.Request) Response {
 	// Get the profile
 	name := mux.Vars(r)["name"]
-	id, profile, err := d.db.ProfileGet(name)
+	id, profile, err := d.cluster.ProfileGet(name)
 	if err != nil {
 		return SmartError(fmt.Errorf("Failed to retrieve profile='%s'", name))
 	}
@@ -260,7 +260,7 @@ func profilePost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check that the name isn't already in use
-	id, _, _ := d.db.ProfileGet(req.Name)
+	id, _, _ := d.cluster.ProfileGet(req.Name)
 	if id > 0 {
 		return Conflict
 	}
@@ -273,7 +273,7 @@ func profilePost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("Invalid profile name '%s'", req.Name))
 	}
 
-	err := d.db.ProfileUpdate(name, req.Name)
+	err := d.cluster.ProfileUpdate(name, req.Name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -295,7 +295,7 @@ func profileDelete(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("Profile is currently in use"))
 	}
 
-	err = d.db.ProfileDelete(name)
+	err = d.cluster.ProfileDelete(name)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/profiles_test.go b/lxd/profiles_test.go
index 2f864cee4..ab2b60b6a 100644
--- a/lxd/profiles_test.go
+++ b/lxd/profiles_test.go
@@ -1,53 +1,39 @@
 package main
 
 import (
-	"database/sql"
-	"io/ioutil"
-	"os"
 	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
 )
 
 func Test_removing_a_profile_deletes_associated_configuration_entries(t *testing.T) {
-	var db *sql.DB
-	var err error
-
-	d := DefaultDaemon()
-	d.os.VarDir, err = ioutil.TempDir("", "lxd-test")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(d.os.VarDir)
-
-	_, err = initializeDbObject(d)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	db = d.db.DB()
+	cluster, cleanup := db.NewTestCluster(t)
+	defer cleanup()
+	db := cluster.DB()
 
 	// Insert a container and a related profile. Dont't forget that the profile
 	// we insert is profile ID 2 (there is a default profile already).
 	statements := `
-    INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1);
+    INSERT INTO containers (node_id, name, architecture, type) VALUES (1, 'thename', 1, 1);
     INSERT INTO profiles (name) VALUES ('theprofile');
     INSERT INTO containers_profiles (container_id, profile_id) VALUES (1, 2);
     INSERT INTO profiles_devices (name, profile_id) VALUES ('somename', 2);
     INSERT INTO profiles_config (key, value, profile_id) VALUES ('thekey', 'thevalue', 2);
     INSERT INTO profiles_devices_config (profile_device_id, key, value) VALUES (1, 'something', 'boring');`
 
-	_, err = db.Exec(statements)
+	_, err := db.Exec(statements)
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	// Delete the profile we just created with dbapi.ProfileDelete
-	err = d.db.ProfileDelete("theprofile")
+	err = cluster.ProfileDelete("theprofile")
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	// Make sure there are 0 profiles_devices entries left.
-	devices, err := d.db.Devices("theprofile", true)
+	devices, err := cluster.Devices("theprofile", true)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -56,7 +42,7 @@ func Test_removing_a_profile_deletes_associated_configuration_entries(t *testing
 	}
 
 	// Make sure there are 0 profiles_config entries left.
-	config, err := d.db.ProfileConfig("theprofile")
+	config, err := cluster.ProfileConfig("theprofile")
 	if err == nil {
 		t.Fatal("found the profile!")
 	}
diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go
index e45670847..7b28e367e 100644
--- a/lxd/profiles_utils.go
+++ b/lxd/profiles_utils.go
@@ -38,7 +38,7 @@ func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req
 			// Check what profile the device comes from
 			profiles := container.Profiles()
 			for i := len(profiles) - 1; i >= 0; i-- {
-				_, profile, err := d.db.ProfileGet(profiles[i])
+				_, profile, err := d.cluster.ProfileGet(profiles[i])
 				if err != nil {
 					return SmartError(err)
 				}
@@ -60,7 +60,7 @@ func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req
 	}
 
 	// Update the database
-	tx, err := d.db.Begin()
+	tx, err := d.cluster.Begin()
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go
index 52878c689..08681d072 100644
--- a/lxd/storage_lvm_utils.go
+++ b/lxd/storage_lvm_utils.go
@@ -684,7 +684,7 @@ func storageLVMThinpoolExists(vgName string, poolName string) (bool, error) {
 func storageLVMGetThinPoolUsers(s *state.State) ([]string, error) {
 	results := []string{}
 
-	cNames, err := s.Node.ContainersList(db.CTypeRegular)
+	cNames, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return results, err
 	}
@@ -702,7 +702,7 @@ func storageLVMGetThinPoolUsers(s *state.State) ([]string, error) {
 		}
 	}
 
-	imageNames, err := s.Node.ImagesGet(false)
+	imageNames, err := s.Cluster.ImagesGet(false)
 	if err != nil {
 		return results, err
 	}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 7484a0eaf..9b609fb12 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -231,7 +231,7 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check if the storage pool is still referenced in any profiles.
-	profiles, err := profilesUsingPoolGetNames(d.db, poolName)
+	profiles, err := profilesUsingPoolGetNames(d.cluster, poolName)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 77476fcb1..5d2c044ac 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -88,7 +88,7 @@ func storagePoolUsedByGet(state *state.State, poolID int64, poolName string) ([]
 	}
 
 	// Retrieve all profiles that exist on this storage pool.
-	profiles, err := profilesUsingPoolGetNames(state.Node, poolName)
+	profiles, err := profilesUsingPoolGetNames(state.Cluster, poolName)
 
 	if err != nil {
 		return []string{}, err
@@ -128,7 +128,7 @@ func storagePoolUsedByGet(state *state.State, poolID int64, poolName string) ([]
 	return poolUsedBy, err
 }
 
-func profilesUsingPoolGetNames(db *db.Node, poolName string) ([]string, error) {
+func profilesUsingPoolGetNames(db *db.Cluster, poolName string) ([]string, error) {
 	usedBy := []string{}
 
 	profiles, err := db.Profiles()
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index ef691645a..2cfe05647 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -172,7 +172,7 @@ func storagePoolVolumeUpdate(state *state.State, poolName string, volumeName str
 
 func storagePoolVolumeUsedByContainersGet(s *state.State, volumeName string,
 	volumeTypeName string) ([]string, error) {
-	cts, err := s.Node.ContainersList(db.CTypeRegular)
+	cts, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return []string{}, err
 	}
@@ -233,7 +233,7 @@ func storagePoolVolumeUsedByGet(s *state.State, volumeName string, volumeTypeNam
 			fmt.Sprintf("/%s/containers/%s", version.APIVersion, ct))
 	}
 
-	profiles, err := profilesUsingPoolVolumeGetNames(s.Node, volumeName, volumeTypeName)
+	profiles, err := profilesUsingPoolVolumeGetNames(s.Cluster, volumeName, volumeTypeName)
 	if err != nil {
 		return []string{}, err
 	}
@@ -249,7 +249,7 @@ func storagePoolVolumeUsedByGet(s *state.State, volumeName string, volumeTypeNam
 	return volumeUsedBy, nil
 }
 
-func profilesUsingPoolVolumeGetNames(db *db.Node, volumeName string, volumeType string) ([]string, error) {
+func profilesUsingPoolVolumeGetNames(db *db.Cluster, volumeName string, volumeType string) ([]string, error) {
 	usedBy := []string{}
 
 	profiles, err := db.Profiles()
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index 15fc96190..eaa170d4e 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -181,26 +181,25 @@ kill_lxd() {
         check_empty "${daemon_dir}/shmounts/"
         check_empty "${daemon_dir}/snapshots/"
 
-        echo "==> Checking for leftover DB entries"
-        check_empty_table "${daemon_dir}/lxd.db" "containers"
-        check_empty_table "${daemon_dir}/lxd.db" "containers_config"
-        check_empty_table "${daemon_dir}/lxd.db" "containers_devices"
-        check_empty_table "${daemon_dir}/lxd.db" "containers_devices_config"
-        check_empty_table "${daemon_dir}/lxd.db" "containers_profiles"
-        check_empty_table "${daemon_dir}/lxd.db" "images"
-        check_empty_table "${daemon_dir}/lxd.db" "images_aliases"
-        check_empty_table "${daemon_dir}/lxd.db" "images_properties"
-        check_empty_table "${daemon_dir}/lxd.db" "images_source"
-        check_empty_table "${daemon_dir}/lxd.db" "profiles"
-        check_empty_table "${daemon_dir}/lxd.db" "profiles_config"
-        check_empty_table "${daemon_dir}/lxd.db" "profiles_devices"
-        check_empty_table "${daemon_dir}/lxd.db" "profiles_devices_config"
-
         echo "==> Checking for leftover cluster DB entries"
 	# FIXME: we should not use the command line sqlite client, since it's
         #        not compatible with dqlite
+        check_empty_table "${daemon_dir}/raft/db.bin" "containers"
+        check_empty_table "${daemon_dir}/raft/db.bin" "containers_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "containers_devices"
+        check_empty_table "${daemon_dir}/raft/db.bin" "containers_devices_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "containers_profiles"
+        check_empty_table "${daemon_dir}/raft/db.bin" "images"
+        check_empty_table "${daemon_dir}/raft/db.bin" "images_aliases"
+        check_empty_table "${daemon_dir}/raft/db.bin" "images_properties"
+        check_empty_table "${daemon_dir}/raft/db.bin" "images_source"
+        check_empty_table "${daemon_dir}/raft/db.bin" "images_nodes"
         check_empty_table "${daemon_dir}/raft/db.bin" "networks"
         check_empty_table "${daemon_dir}/raft/db.bin" "networks_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "profiles"
+        check_empty_table "${daemon_dir}/raft/db.bin" "profiles_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "profiles_devices"
+        check_empty_table "${daemon_dir}/raft/db.bin" "profiles_devices_config"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools_config"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_volumes"
diff --git a/test/suites/backup.sh b/test/suites/backup.sh
index 84b304836..0b14a0b83 100644
--- a/test/suites/backup.sh
+++ b/test/suites/backup.sh
@@ -17,7 +17,7 @@ test_container_import() {
     ! lxd import ctImport
     lxd import ctImport --force
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
     lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     lxd import ctImport --force
     lxc start ctImport
@@ -39,7 +39,7 @@ test_container_import() {
     lxc start ctImport
     pid=$(lxc info ctImport | grep ^Pid | awk '{print $2}')
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
     ! lxd import ctImport
     lxd import ctImport --force
     lxc info ctImport | grep snap0
@@ -51,7 +51,7 @@ test_container_import() {
     lxc start ctImport
     pid=$(lxc info ctImport | grep ^Pid | awk '{print $2}')
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
     ! lxd import ctImport
     lxd import ctImport --force
     lxc info ctImport | grep snap0
@@ -63,8 +63,8 @@ test_container_import() {
     lxc start ctImport
     pid=$(lxc info ctImport | grep ^Pid | awk '{print $2}')
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
     lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     ! lxd import ctImport
     lxd import ctImport --force
@@ -77,8 +77,8 @@ test_container_import() {
     lxc start ctImport
     pid=$(lxc info ctImport | grep ^Pid | awk '{print $2}')
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
     lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport/snap0'"
     lxd import ctImport
@@ -98,8 +98,8 @@ test_container_import() {
     fi
     pid=$(lxc info ctImport | grep ^Pid | awk '{print $2}')
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
     lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     ! lxd import ctImport
     lxd import ctImport --force
diff --git a/test/suites/database_update.sh b/test/suites/database_update.sh
index b8b46301f..872308f20 100644
--- a/test/suites/database_update.sh
+++ b/test/suites/database_update.sh
@@ -9,12 +9,12 @@ test_database_update(){
   spawn_lxd "${LXD_MIGRATE_DIR}" true
 
   # Assert there are enough tables.
-  expected_tables=17
+  expected_tables=7
   tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE")
   [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; }
 
   # There should be 12 "ON DELETE CASCADE" occurrences
-  expected_cascades=11
+  expected_cascades=3
   cascades=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "ON DELETE CASCADE")
   [ "${cascades}" -eq "${expected_cascades}" ] || { echo "FAIL: Wrong number of ON DELETE CASCADE foreign keys. Found: ${cascades}, exected: ${expected_cascades}"; false; }
 
diff --git a/test/suites/image.sh b/test/suites/image.sh
index e36bb149e..fbb44ab8c 100644
--- a/test/suites/image.sh
+++ b/test/suites/image.sh
@@ -27,7 +27,7 @@ test_image_expiry() {
   lxc_remote image list l2: | grep -q "${fpbrief}"
 
   # Override the upload date
-  sqlite3 "${LXD2_DIR}/lxd.db" "UPDATE images SET last_use_date='$(date --rfc-3339=seconds -u -d "2 days ago")' WHERE fingerprint='${fp}'"
+  LXD_DIR="$LXD2_DIR" lxd sql "UPDATE images SET last_use_date='$(date --rfc-3339=seconds -u -d "2 days ago")' WHERE fingerprint='${fp}'" | grep -q "Rows affected: 1"
 
   # Trigger the expiry
   lxc_remote config set l2: images.remote_cache_expiry 1
diff --git a/test/suites/profiling.sh b/test/suites/profiling.sh
index 769a5580e..63859bd95 100644
--- a/test/suites/profiling.sh
+++ b/test/suites/profiling.sh
@@ -4,7 +4,8 @@ test_cpu_profiling() {
   spawn_lxd "${LXD3_DIR}" false --cpuprofile "${LXD3_DIR}/cpu.out"
   lxdpid=$(cat "${LXD3_DIR}/lxd.pid")
   kill -TERM "${lxdpid}"
-  wait "${lxdpid}" || true
+  wait "${lxdpid}"
+     #|| true
   export PPROF_TMPDIR="${TEST_DIR}/pprof"
   echo top5 | go tool pprof "$(which lxd)" "${LXD3_DIR}/cpu.out"
   echo ""

From 22fe9c1c51b91b1600ad6acd5a561e9fd2573d9f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 26 Oct 2017 18:17:54 +0000
Subject: [PATCH 064/227] Covert api.Cluster.TargetCert to string

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go    | 2 +-
 client/lxd_cluster.go   | 2 +-
 lxd/api_cluster.go      | 2 +-
 lxd/api_cluster_test.go | 6 +++---
 lxd/main_init.go        | 2 +-
 shared/api/cluster.go   | 2 +-
 6 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 22ca2cc9f..c5744512b 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -165,7 +165,7 @@ type ContainerServer interface {
 	GetCluster(password string) (cluster *api.Cluster, err error)
 	BootstrapCluster(name string) (op *Operation, err error)
 	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
-	JoinCluster(targetAddress, targetPassword string, targetCert []byte, name string) (op *Operation, err error)
+	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 7d153cbb5..20a107b8b 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -55,7 +55,7 @@ func (r *ProtocolLXD) AcceptNode(targetPassword, name, address string, schema, a
 }
 
 // JoinCluster requests to join an existing cluster.
-func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword string, targetCert []byte, name string) (*Operation, error) {
+func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword, targetCert, name string) (*Operation, error) {
 	cluster := api.ClusterPost{
 		TargetAddress:  targetAddress,
 		TargetPassword: targetPassword,
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 66641ef50..7d9d624c6 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -173,7 +173,7 @@ func clusterPostJoin(d *Daemon, req api.ClusterPost) Response {
 		}
 
 		// Update our TLS configuration using the returned cluster certificate.
-		err = util.WriteCert(d.os.VarDir, "cluster", req.TargetCert, info.PrivateKey, req.TargetCA)
+		err = util.WriteCert(d.os.VarDir, "cluster", []byte(req.TargetCert), info.PrivateKey, req.TargetCA)
 		if err != nil {
 			return errors.Wrap(err, "failed to save cluster certificate")
 		}
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 02f5e7fc4..55ba594ac 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -48,7 +48,7 @@ func TestCluster_Join(t *testing.T) {
 
 	// Make the second node join the cluster.
 	address := daemons[0].endpoints.NetworkAddress()
-	cert := daemons[0].endpoints.NetworkPublicKey()
+	cert := string(daemons[0].endpoints.NetworkPublicKey())
 	client = f.ClientUnix(daemons[1])
 	op, err = client.JoinCluster(address, "sekret", cert, "rusp")
 	require.NoError(t, err)
@@ -102,7 +102,7 @@ func TestCluster_JoinWrongTrustPassword(t *testing.T) {
 
 	// Make the second node join the cluster.
 	address := daemons[0].endpoints.NetworkAddress()
-	cert := daemons[0].endpoints.NetworkPublicKey()
+	cert := string(daemons[0].endpoints.NetworkPublicKey())
 	client = f.ClientUnix(daemons[1])
 	op, err = client.JoinCluster(address, "noop", cert, "rusp")
 	require.NoError(t, err)
@@ -160,7 +160,7 @@ func (f *clusterFixture) FormCluster(daemons []*Daemon) {
 
 	// Make the other nodes join the cluster.
 	address := daemons[0].endpoints.NetworkAddress()
-	cert := daemons[0].endpoints.NetworkPublicKey()
+	cert := string(daemons[0].endpoints.NetworkPublicKey())
 	for i, daemon := range daemons[1:] {
 		client = f.ClientUnix(daemon)
 		op, err := client.JoinCluster(address, "sekret", cert, fmt.Sprintf("rusp-%d", i))
diff --git a/lxd/main_init.go b/lxd/main_init.go
index 1c36976d3..55e77e1da 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -262,7 +262,7 @@ func (cmd *CmdInit) fillDataWithClustering(data *cmdInitData, clustering *cmdIni
 	}
 	data.Cluster.Name = clustering.Name
 	data.Cluster.TargetAddress = clustering.TargetAddress
-	data.Cluster.TargetCert = clustering.TargetCert
+	data.Cluster.TargetCert = string(clustering.TargetCert)
 	data.Cluster.TargetPassword = clustering.TargetPassword
 }
 
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 045411d64..61339f650 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -16,7 +16,7 @@ type ClusterPost struct {
 	Schema         int    `json:"schema" yaml:"schema"`
 	API            int    `json:"api" yaml:"api"`
 	TargetAddress  string `json:"target_address" yaml:"target_address"`
-	TargetCert     []byte `json:"target_cert" yaml:"target_cert"`
+	TargetCert     string `json:"target_cert" yaml:"target_cert"`
 	TargetCA       []byte `json:"target_ca" yaml:"target_ca"`
 	TargetPassword string `json:"target_password" yaml:"target_password"`
 }

From 2529a380c3c9b1e42ac5de16a260ea6e9e1864ea Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 27 Oct 2017 12:42:37 +0000
Subject: [PATCH 065/227] Don't block on failed db queries when shutting down

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/connection.go   | 26 +++++++++++++++---------
 lxd/cluster/gateway.go | 55 ++++++++++++++++++++++++++++++++++++++++----------
 lxd/daemon.go          | 21 +++++++++++++------
 lxd/main_daemon.go     |  1 +
 lxd/main_shutdown.go   |  5 ++++-
 5 files changed, 80 insertions(+), 28 deletions(-)

diff --git a/client/connection.go b/client/connection.go
index c2e830cd9..27d64b7bf 100644
--- a/client/connection.go
+++ b/client/connection.go
@@ -45,6 +45,9 @@ type ConnectionArgs struct {
 
 	// Cookie jar
 	CookieJar http.CookieJar
+
+	// Skip automatic GetServer request upon connection
+	SkipGetServer bool
 }
 
 // ConnectLXD lets you connect to a remote LXD daemon over HTTPs.
@@ -97,13 +100,15 @@ func ConnectLXDUnix(path string, args *ConnectionArgs) (ContainerServer, error)
 	server.http = httpClient
 
 	// Test the connection and seed the server information
-	serverStatus, _, err := server.GetServer()
-	if err != nil {
-		return nil, err
-	}
+	if !args.SkipGetServer {
+		serverStatus, _, err := server.GetServer()
+		if err != nil {
+			return nil, err
+		}
 
-	// Record the server certificate
-	server.httpCertificate = serverStatus.Environment.Certificate
+		// Record the server certificate
+		server.httpCertificate = serverStatus.Environment.Certificate
+	}
 
 	return &server, nil
 }
@@ -184,10 +189,11 @@ func httpsLXD(url string, args *ConnectionArgs) (ContainerServer, error) {
 	}
 
 	// Test the connection and seed the server information
-	_, _, err = server.GetServer()
-	if err != nil {
-		return nil, err
+	if !args.SkipGetServer {
+		_, _, err := server.GetServer()
+		if err != nil {
+			return nil, err
+		}
 	}
-
 	return &server, nil
 }
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 243ec27b4..64c891b02 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -31,10 +31,13 @@ import (
 // HandlerFuncs method returns and to access the dqlite cluster using the gRPC
 // dialer returned by the Dialer method.
 func NewGateway(db *db.Node, cert *shared.CertInfo, latency float64) (*Gateway, error) {
+	ctx, cancel := context.WithCancel(context.Background())
 	gateway := &Gateway{
 		db:      db,
 		cert:    cert,
 		latency: latency,
+		ctx:     ctx,
+		cancel:  cancel,
 	}
 
 	err := gateway.init()
@@ -69,6 +72,11 @@ type Gateway struct {
 	// database, to minimize the difference between code paths in
 	// clustering and non-clustering modes.
 	memoryDial func() (*grpc.ClientConn, error)
+
+	// Used when shutting down the daemon to cancel any ongoing gRPC
+	// dialing attempt.
+	ctx    context.Context
+	cancel context.CancelFunc
 }
 
 // HandlerFuncs returns the HTTP handlers that should be added to the REST API
@@ -146,10 +154,11 @@ func (g *Gateway) Dialer() grpcsql.Dialer {
 			return g.memoryDial()
 		}
 
-		// FIXME: timeout should be configurable
+		// TODO: should the timeout be configurable?
+		ctx, cancel := context.WithTimeout(g.ctx, 5*time.Second)
+		defer cancel()
 		var err error
-		remaining := 10 * time.Second
-		for remaining > 0 {
+		for {
 			// Network connection.
 			addresses, dbErr := g.cachedRaftNodes()
 			if dbErr != nil {
@@ -158,19 +167,34 @@ func (g *Gateway) Dialer() grpcsql.Dialer {
 
 			for _, address := range addresses {
 				var conn *grpc.ClientConn
-				conn, err = grpcNetworkDial(address, g.cert, time.Second)
+				conn, err = grpcNetworkDial(g.ctx, address, g.cert)
 				if err == nil {
 					return conn, nil
 				}
 				logger.Debugf("Failed to establish gRPC connection with %s: %v", address, err)
 			}
-			time.Sleep(250 * time.Millisecond)
-			remaining -= 250 * time.Millisecond
+			if ctx.Err() != nil {
+				return nil, ctx.Err()
+			}
+			select {
+			case <-time.After(250 * time.Millisecond):
+				continue
+			case <-ctx.Done():
+				return nil, ctx.Err()
+			}
 		}
-		return nil, err
 	}
 }
 
+// Kill is an API that the daemon calls before it actually shuts down and calls
+// Shutdown(). It will abort any ongoing or new attempt to establish a SQL gRPC
+// connection with the dialer (typically for running some pre-shutdown
+// queries).
+func (g *Gateway) Kill() {
+	logger.Debug("Cancel ongoing or future gRPC connection attempts")
+	g.cancel()
+}
+
 // Shutdown this gateway, stopping the gRPC server and possibly the raft factory.
 func (g *Gateway) Shutdown() error {
 	if g.server != nil {
@@ -276,16 +300,27 @@ func (g *Gateway) cachedRaftNodes() ([]string, error) {
 	return addresses, nil
 }
 
-func grpcNetworkDial(addr string, cert *shared.CertInfo, t time.Duration) (*grpc.ClientConn, error) {
+func grpcNetworkDial(ctx context.Context, addr string, cert *shared.CertInfo) (*grpc.ClientConn, error) {
 	config, err := tlsClientConfig(cert)
 	if err != nil {
 		return nil, err
 	}
 
+	// The whole attempt should not take more than a second. If the context
+	// gets cancelled, calling code will typically try against another
+	// database node, in round robin.
+	ctx, cancel := context.WithTimeout(ctx, time.Second)
+	defer cancel()
+
 	// Make a probe HEAD request to check if the target node is the leader.
 	url := fmt.Sprintf("https://%s%s", addr, grpcEndpoint)
+	request, err := http.NewRequest("HEAD", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	request = request.WithContext(ctx)
 	client := &http.Client{Transport: &http.Transport{TLSClientConfig: config}}
-	response, err := client.Head(url)
+	response, err := client.Do(request)
 	if err != nil {
 		return nil, err
 	}
@@ -293,8 +328,6 @@ func grpcNetworkDial(addr string, cert *shared.CertInfo, t time.Duration) (*grpc
 		return nil, fmt.Errorf(response.Status)
 	}
 
-	ctx, cancel := context.WithTimeout(context.Background(), t)
-	defer cancel()
 	options := []grpc.DialOption{
 		grpc.WithTransportCredentials(credentials.NewTLS(config)),
 	}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 962b42417..798fe462f 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -411,12 +411,6 @@ func (d *Daemon) init() error {
 		return errors.Wrap(err, "failed to fetch node address")
 	}
 
-	/* Open the cluster database */
-	d.cluster, err = db.OpenCluster("db.bin", d.gateway.Dialer(), address)
-	if err != nil {
-		return errors.Wrap(err, "failed to open cluster database")
-	}
-
 	/* Setup the web server */
 	config := &endpoints.Config{
 		Dir:                  d.os.VarDir,
@@ -431,6 +425,12 @@ func (d *Daemon) init() error {
 		return err
 	}
 
+	/* Open the cluster database */
+	d.cluster, err = db.OpenCluster("db.bin", d.gateway.Dialer(), address)
+	if err != nil {
+		return errors.Wrap(err, "failed to open cluster database")
+	}
+
 	/* Migrate the node local data to the cluster database, if needed */
 	if dump != nil {
 		logger.Infof("Migrating data from lxd.db to db.bin")
@@ -570,6 +570,15 @@ func (d *Daemon) numRunningContainers() (int, error) {
 	return count, nil
 }
 
+// Kill signals the daemon that we want to shutdown, and that any work
+// initiated from this point (e.g. database queries over gRPC) should not be
+// retried in case of failure.
+func (d *Daemon) Kill() {
+	if d.gateway != nil {
+		d.gateway.Kill()
+	}
+}
+
 // Stop stops the shared daemon.
 func (d *Daemon) Stop() error {
 	errs := []error{}
diff --git a/lxd/main_daemon.go b/lxd/main_daemon.go
index 7b9d84372..a4520315e 100644
--- a/lxd/main_daemon.go
+++ b/lxd/main_daemon.go
@@ -66,6 +66,7 @@ func cmdDaemon(args *Args) error {
 
 	case <-d.shutdownChan:
 		logger.Infof("Asked to shutdown by API, shutting down containers.")
+		d.Kill()
 		containersShutdown(s)
 		networkShutdown(s)
 	}
diff --git a/lxd/main_shutdown.go b/lxd/main_shutdown.go
index 00654ff64..675ad647c 100644
--- a/lxd/main_shutdown.go
+++ b/lxd/main_shutdown.go
@@ -8,7 +8,10 @@ import (
 )
 
 func cmdShutdown(args *Args) error {
-	c, err := lxd.ConnectLXDUnix("", nil)
+	connArgs := &lxd.ConnectionArgs{
+		SkipGetServer: true,
+	}
+	c, err := lxd.ConnectLXDUnix("", connArgs)
 	if err != nil {
 		return err
 	}

From ac0fae56845527868ce83f74fb9e3f9f968e1a10 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 26 Oct 2017 18:15:23 +0000
Subject: [PATCH 066/227] Basic clustering integration tests

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/includes/clustering.sh | 72 +++++++++++++++++++++++++++++++++++++++++++
 test/includes/lxd.sh        | 36 ++++++++++++++--------
 test/main.sh                |  3 ++
 test/suites/clustering.sh   | 75 +++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 174 insertions(+), 12 deletions(-)
 create mode 100644 test/includes/clustering.sh
 create mode 100644 test/suites/clustering.sh

diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
new file mode 100644
index 000000000..6ffda750c
--- /dev/null
+++ b/test/includes/clustering.sh
@@ -0,0 +1,72 @@
+# Test helper for clustering
+
+setup_clustering_bridge() {
+  name="br$$"
+
+  echo "==> Setup clustering bridge ${name}"
+
+  brctl addbr "${name}"
+  ip addr add 10.1.1.1/16 dev "${name}"
+  ip link set dev "${name}" up
+
+  iptables -t nat -A POSTROUTING -s 10.1.0.0/16 -d 0.0.0.0/0 -j MASQUERADE
+  echo 1 > /proc/sys/net/ipv4/ip_forward
+}
+
+teardown_clustering_bridge() {
+  name="br$$"
+
+  if brctl show | grep -q "${name}" ; then
+      echo "==> Teardown clustering bridge ${name}"
+      echo 0 > /proc/sys/net/ipv4/ip_forward
+      iptables -t nat -D POSTROUTING -s 10.1.0.0/16 -d 0.0.0.0/0 -j MASQUERADE
+      ip link set dev "${name}" down
+      ip addr del 10.1.1.1/16 dev "${name}"
+      brctl delbr "${name}"
+  fi
+}
+
+setup_clustering_netns() {
+  id="${1}"
+  shift
+
+  prefix="lxd$$"
+  ns="${prefix}${id}"
+
+  echo "==> Setup clustering netns ${ns}"
+
+  ip netns add "${ns}"
+
+  veth1="v${ns}1"
+  veth2="v${ns}2"
+
+  ip link add "${veth1}" type veth peer name "${veth2}"
+  ip link set "${veth2}" netns "${ns}"
+
+  bridge="br$$"
+  brctl addif "${bridge}" "${veth1}"
+
+  ip link set "${veth1}" up
+
+  ip netns exec "${ns}" ip link set dev lo up
+  ip netns exec "${ns}" ip link set dev "${veth2}" name eth0
+  ip netns exec "${ns}" ip link set eth0 up
+  ip netns exec "${ns}" ip addr add "10.1.1.10${id}/16" dev eth0
+  ip netns exec "${ns}" ip route add default via 10.1.1.1
+}
+
+teardown_clustering_netns() {
+  prefix="lxd$$"
+  bridge="br$$"
+  for ns in $(ip netns | grep "${prefix}" | cut -f 1 -d " ") ; do
+      echo "==> Teardown clustering netns ${ns}"
+      veth1="v${ns}1"
+      veth2="v${ns}2"
+      ip netns exec "${ns}" ip link set eth0 down
+      ip netns exec "${ns}" ip link set lo down
+      ip link set "${veth1}" down
+      brctl delif "${bridge}" "${veth1}"
+      ip link delete "${veth1}" type veth
+      ip netns delete "${ns}"
+  done
+}
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index eaa170d4e..71c0799a7 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -40,7 +40,11 @@ spawn_lxd() {
     echo "==> Spawning lxd in ${lxddir}"
     # shellcheck disable=SC2086
 
-    LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+    if [ "${LXD_NETNS}" = "" ]; then
+	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+    else
+	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" ip netns exec "${LXD_NETNS}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+    fi
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
     # shellcheck disable=SC2153
@@ -50,15 +54,17 @@ spawn_lxd() {
     echo "==> Confirming lxd is responsive"
     LXD_DIR="${lxddir}" lxd waitready --timeout=300
 
-    echo "==> Binding to network"
-    # shellcheck disable=SC2034
-    for i in $(seq 10); do
-        addr="127.0.0.1:$(local_tcp_port)"
-        LXD_DIR="${lxddir}" lxc config set core.https_address "${addr}" || continue
-        echo "${addr}" > "${lxddir}/lxd.addr"
-        echo "==> Bound to ${addr}"
-        break
-    done
+    if [ "${LXD_NETNS}" = "" ]; then
+	echo "==> Binding to network"
+	# shellcheck disable=SC2034
+	for i in $(seq 10); do
+            addr="127.0.0.1:$(local_tcp_port)"
+            LXD_DIR="${lxddir}" lxc config set core.https_address "${addr}" || continue
+            echo "${addr}" > "${lxddir}/lxd.addr"
+            echo "==> Bound to ${addr}"
+            break
+	done
+    fi
 
     echo "==> Setting trust password"
     LXD_DIR="${lxddir}" lxc config set core.trust_password foo
@@ -66,8 +72,10 @@ spawn_lxd() {
         set -x
     fi
 
-    echo "==> Setting up networking"
-    LXD_DIR="${lxddir}" lxc profile device add default eth0 nic nictype=p2p name=eth0
+    if [ "${LXD_NETNS}" = "" ]; then
+	echo "==> Setting up networking"
+	LXD_DIR="${lxddir}" lxc profile device add default eth0 nic nictype=p2p name=eth0
+    fi
 
     if [ "${storage}" = true ]; then
         echo "==> Configuring storage backend"
@@ -289,4 +297,8 @@ cleanup_lxds() {
     wipe "$test_dir"
 
     umount_loops "$test_dir"
+
+    # Cleanup clustering networking, if any
+    teardown_clustering_netns
+    teardown_clustering_bridge
 }
diff --git a/test/main.sh b/test/main.sh
index bcf6418fc..a45399dd8 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -24,6 +24,9 @@ if [ -z "${LXD_BACKEND:-}" ]; then
     LXD_BACKEND="dir"
 fi
 
+# shellcheck disable=SC2034
+LXD_NETNS=""
+
 import_subdir_files() {
     test "$1"
     # shellcheck disable=SC2039
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
new file mode 100644
index 000000000..2197ea439
--- /dev/null
+++ b/test/suites/clustering.sh
@@ -0,0 +1,75 @@
+test_clustering() {
+  setup_clustering_bridge
+  prefix="lxd$$"
+
+  setup_clustering_netns 1
+  LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_ONE_DIR}"
+  ns="${prefix}1"
+  LXD_NETNS="${ns}" spawn_lxd "${LXD_ONE_DIR}" false
+  (
+    set -e
+    # shellcheck disable=SC2034
+    LXD_DIR=${LXD_ONE_DIR}
+
+  cat <<EOF | lxd init --preseed
+config:
+  core.trust_password: sekret
+  core.https_address: 10.1.1.101:8443
+  images.auto_update_interval: 15
+storage_pools:
+- name: data
+  driver: dir
+profiles:
+- name: default
+  devices:
+    root:
+      path: /
+      pool: data
+      type: disk
+cluster:
+  name: one
+EOF
+  )
+
+  # Add a newline at the end of each line. YAML as weird rules..
+  cert=$(sed ':a;N;$!ba;s/\n/\n\n/g' "${LXD_ONE_DIR}/server.crt")
+
+  setup_clustering_netns 2
+  LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_TWO_DIR}"
+  ns="${prefix}2"
+  LXD_NETNS="${ns}" spawn_lxd "${LXD_TWO_DIR}" false
+  (
+    set -e
+    # shellcheck disable=SC2034
+    LXD_DIR=${LXD_TWO_DIR}
+
+  cat <<EOF | lxd init --preseed
+config:
+  core.https_address: 10.1.1.102:8443
+  images.auto_update_interval: 15
+storage_pools:
+- name: data
+  driver: dir
+profiles:
+- name: default
+  devices:
+    root:
+      path: /
+      pool: data
+      type: disk
+cluster:
+  name: two
+  target_address: 10.1.1.101:8443
+  target_password: sekret
+  target_cert: "$cert"
+EOF
+  )
+
+  LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
+  LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
+  sleep 2
+  rm -f "${LXD_TWO_DIR}/unix.socket"
+  rm -f "${LXD_ONE_DIR}/unix.socket"
+}

From 2c58d4a8584447bc35e445b03d06e6d73d6091d9 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 27 Oct 2017 13:20:27 +0000
Subject: [PATCH 067/227] Retry database interactions if raft leadership
 changed

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/containers_get.go |  2 +-
 lxd/db/db.go          | 44 +++++++++++++++++++++++++-------------------
 2 files changed, 26 insertions(+), 20 deletions(-)

diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 29ac485ac..376190b86 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -19,7 +19,7 @@ func containersGet(d *Daemon, r *http.Request) Response {
 		if err == nil {
 			return SyncResponse(true, result)
 		}
-		if !db.IsDbLockedError(err) {
+		if !db.IsRetriableError(err) {
 			logger.Debugf("DBERR: containersGet: error %q", err)
 			return SmartError(err)
 		}
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 0dd5e6c7e..59770699f 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -6,7 +6,7 @@ import (
 	"strings"
 	"time"
 
-	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/mattn/go-sqlite3"
 	"github.com/pkg/errors"
 
@@ -213,21 +213,15 @@ func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 
 	// FIXME: the retry loop should be configurable.
 	var err error
-	for i := 0; i < 10; i++ {
+	for i := 0; i < 20; i++ {
 		err = query.Transaction(c.db, func(tx *sql.Tx) error {
 			clusterTx.tx = tx
 			return f(clusterTx)
 		})
-		if err != nil {
-			// FIXME: we should bubble errors using errors.Wrap()
-			// instead, and check for sql.ErrBadConnection.
-			badConnection := strings.Contains(err.Error(), "bad connection")
-			leadershipLost := strings.Contains(err.Error(), "leadership lost")
-			if badConnection || leadershipLost {
-				logger.Debugf("Retry failed transaction")
-				time.Sleep(time.Second)
-				continue
-			}
+		if err != nil && IsRetriableError(err) {
+			logger.Debugf("Retry failed transaction")
+			time.Sleep(250 * time.Millisecond)
+			continue
 		}
 		break
 	}
@@ -277,7 +271,9 @@ func UpdateSchemasDotGo() error {
 	return nil
 }
 
-func IsDbLockedError(err error) bool {
+// IsRetriableError returns true if the given error might be transient and the
+// interaction can be safely retried.
+func IsRetriableError(err error) bool {
 	if err == nil {
 		return false
 	}
@@ -287,6 +283,16 @@ func IsDbLockedError(err error) bool {
 	if err.Error() == "database is locked" {
 		return true
 	}
+
+	// FIXME: we should bubble errors using errors.Wrap()
+	// instead, and check for err.Cause() == sql.ErrBadConnection.
+	if strings.Contains(err.Error(), "bad connection") {
+		return true
+	}
+	if strings.Contains(err.Error(), "leadership lost") {
+		return true
+	}
+
 	return false
 }
 
@@ -306,7 +312,7 @@ func begin(db *sql.DB) (*sql.Tx, error) {
 		if err == nil {
 			return tx, nil
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			logger.Debugf("DbBegin: error %q", err)
 			return nil, err
 		}
@@ -324,7 +330,7 @@ func TxCommit(tx *sql.Tx) error {
 		if err == nil {
 			return nil
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			logger.Debugf("Txcommit: error %q", err)
 			return err
 		}
@@ -345,7 +351,7 @@ func dbQueryRowScan(db *sql.DB, q string, args []interface{}, outargs []interfac
 		if isNoMatchError(err) {
 			return err
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			return err
 		}
 		time.Sleep(30 * time.Millisecond)
@@ -362,7 +368,7 @@ func dbQuery(db *sql.DB, q string, args ...interface{}) (*sql.Rows, error) {
 		if err == nil {
 			return result, nil
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			logger.Debugf("DbQuery: query %q error %q", q, err)
 			return nil, err
 		}
@@ -447,7 +453,7 @@ func queryScan(qi queryer, q string, inargs []interface{}, outfmt []interface{})
 		if err == nil {
 			return result, nil
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			logger.Debugf("DbQuery: query %q error %q", q, err)
 			return nil, err
 		}
@@ -465,7 +471,7 @@ func exec(db *sql.DB, q string, args ...interface{}) (sql.Result, error) {
 		if err == nil {
 			return result, nil
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			logger.Debugf("DbExec: query %q error %q", q, err)
 			return nil, err
 		}

From d6f20d34693bb968cb7a0a3bd4a305d56e965a91 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 30 Oct 2017 09:35:21 +0000
Subject: [PATCH 068/227] Limit open db connections to 1, to match the former
 exclusive mode

The node-level database is opened with _txlock=exclusive, which
doesn't quite work with dqlite. However limiting the number of open
connections to 1 has effectively the same semantics.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/db.go     | 1 +
 lxd/db/images.go | 5 +++--
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/lxd/db/db.go b/lxd/db/db.go
index 59770699f..97d1f9173 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -170,6 +170,7 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 	cluster := &Cluster{
 		db: db,
 	}
+	db.SetMaxOpenConns(1)
 
 	// Figure out the ID of this node.
 	err = cluster.Transaction(func(tx *ClusterTx) error {
diff --git a/lxd/db/images.go b/lxd/db/images.go
index e69e8acb7..e71ef07ac 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -440,14 +440,15 @@ func (c *Cluster) ImageUpdate(id int, fname string, sz int64, public bool, autoU
 		return err
 	}
 
-	stmt, err = tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, ?, ?, ?)`)
+	stmt2, err := tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, ?, ?, ?)`)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
+	defer stmt2.Close()
 
 	for key, value := range properties {
-		_, err = stmt.Exec(id, 0, key, value)
+		_, err = stmt2.Exec(id, 0, key, value)
 		if err != nil {
 			tx.Rollback()
 			return err

From 57d412e890eb864728c16c34d66c5604bc952d40 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 27 Oct 2017 13:21:25 +0000
Subject: [PATCH 069/227] Cluster notifications for network delete

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api.go                |  7 +++++++
 lxd/api_1.0.go            |  2 +-
 lxd/networks.go           | 41 +++++++++++++++++++++++++++++++++++------
 test/suites/clustering.sh | 29 +++++++++++++++++++++++++----
 4 files changed, 68 insertions(+), 11 deletions(-)

diff --git a/lxd/api.go b/lxd/api.go
index ba6285ce9..825874f1b 100644
--- a/lxd/api.go
+++ b/lxd/api.go
@@ -98,3 +98,10 @@ func setCORSHeaders(rw http.ResponseWriter, req *http.Request, config *cluster.C
 		rw.Header().Set("Access-Control-Allow-Credentials", "true")
 	}
 }
+
+// Return true if this an API request coming from a cluster node that is
+// notifying us of some user-initiated API request that needs some action to be
+// taken on this node as well.
+func isClusterNotification(r *http.Request) bool {
+	return r.Header.Get("User-Agent") == "lxd-cluster-notifier"
+}
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 7a26e2591..1a1cbf561 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -174,7 +174,7 @@ func api10Put(d *Daemon, r *http.Request) Response {
 
 	// If this is a notification from a cluster node, just run the triggers
 	// for reacting to the values that changed.
-	if r.Header.Get("User-Agent") == "lxd-cluster-notifier" {
+	if isClusterNotification(r) {
 		changed := make(map[string]string)
 		for key, value := range req.Config {
 			changed[key] = value.(string)
diff --git a/lxd/networks.go b/lxd/networks.go
index 80ae27867..f0b120641 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -16,6 +16,8 @@ import (
 	"github.com/gorilla/mux"
 	log "github.com/lxc/lxd/shared/log15"
 
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/util"
@@ -244,8 +246,37 @@ func networkDelete(d *Daemon, r *http.Request) Response {
 	if err != nil {
 		return NotFound
 	}
+	if isClusterNotification(r) {
+		n.db = nil // We just want to delete the network from the system
+	} else {
+		// Sanity checks
+		if n.IsUsed() {
+			return BadRequest(fmt.Errorf("The network is currently in use"))
+		}
+	}
+
+	// If we're just handling a notification, we're done.
+	if n.db == nil {
+		return EmptySyncResponse
+	}
+
+	// Notify all other nodes. If any node is down, an error will be returned.
+	notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAll)
+	if err != nil {
+		return SmartError(err)
+	}
+	err = notifier(func(client lxd.ContainerServer) error {
+		_, _, err := client.GetServer()
+		if err != nil {
+			return err
+		}
+		return client.DeleteNetwork(name)
+	})
+	if err != nil {
+		return SmartError(err)
+	}
 
-	// Attempt to delete the network
+	// Delete the network
 	err = n.Delete()
 	if err != nil {
 		return SmartError(err)
@@ -502,17 +533,15 @@ func (n *network) IsUsed() bool {
 }
 
 func (n *network) Delete() error {
-	// Sanity checks
-	if n.IsUsed() {
-		return fmt.Errorf("The network is currently in use")
-	}
-
 	// Bring the network down
 	if n.IsRunning() {
 		err := n.Stop()
 		if err != nil {
 			return err
 		}
+		if n.db == nil {
+			return nil
+		}
 	}
 
 	// Remove the network from the database
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 2197ea439..7e963ff1f 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -1,12 +1,13 @@
 test_clustering() {
   setup_clustering_bridge
   prefix="lxd$$"
+  bridge="${prefix}"
 
   setup_clustering_netns 1
   LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
   chmod +x "${LXD_ONE_DIR}"
-  ns="${prefix}1"
-  LXD_NETNS="${ns}" spawn_lxd "${LXD_ONE_DIR}" false
+  ns1="${prefix}1"
+  LXD_NETNS="${ns1}" spawn_lxd "${LXD_ONE_DIR}" false
   (
     set -e
     # shellcheck disable=SC2034
@@ -20,6 +21,12 @@ config:
 storage_pools:
 - name: data
   driver: dir
+networks:
+- name: $bridge
+  type: bridge
+  config:
+    ipv4.address: none
+    ipv6.address: none
 profiles:
 - name: default
   devices:
@@ -38,8 +45,8 @@ EOF
   setup_clustering_netns 2
   LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
   chmod +x "${LXD_TWO_DIR}"
-  ns="${prefix}2"
-  LXD_NETNS="${ns}" spawn_lxd "${LXD_TWO_DIR}" false
+  ns2="${prefix}2"
+  LXD_NETNS="${ns2}" spawn_lxd "${LXD_TWO_DIR}" false
   (
     set -e
     # shellcheck disable=SC2034
@@ -52,6 +59,12 @@ config:
 storage_pools:
 - name: data
   driver: dir
+networks:
+- name: $bridge
+  type: bridge
+  config:
+    ipv4.address: none
+    ipv6.address: none
 profiles:
 - name: default
   devices:
@@ -67,6 +80,14 @@ cluster:
 EOF
   )
 
+  # The preseeded network bridge exists on all nodes.
+  ip netns exec "${ns1}" ip link show "${bridge}" > /dev/null
+  ip netns exec "${ns2}" ip link show "${bridge}" > /dev/null
+
+  # The preseeded network can be deleted from any node, other nodes
+  # are notified.
+  LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From 96f56bfed0e6b5fd1d24f8f5445d252ff5129fba Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 30 Oct 2017 10:45:58 +0000
Subject: [PATCH 070/227] Add integration test exercising cluster config values
 changes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/suites/clustering.sh | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 7e963ff1f..5ef8c7b60 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -80,6 +80,11 @@ cluster:
 EOF
   )
 
+  # Configuration keys can be changed on any node.
+  LXD_DIR="${LXD_TWO_DIR}" lxc config set images.auto_update_interval 10
+  LXD_DIR="${LXD_ONE_DIR}" lxc info | grep -q 'images.auto_update_interval: "10"'
+  LXD_DIR="${LXD_TWO_DIR}" lxc info | grep -q 'images.auto_update_interval: "10"'
+
   # The preseeded network bridge exists on all nodes.
   ip netns exec "${ns1}" ip link show "${bridge}" > /dev/null
   ip netns exec "${ns2}" ip link show "${bridge}" > /dev/null

From 4c5950ea5f1cfb190e56ac292c967146d196f9cd Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 31 Oct 2017 10:39:58 +0000
Subject: [PATCH 071/227] Make /internal/raft redirect to a known cluster node

In case the node handling the request is not a database node, redirect
to one of the database nodes it knows about. This makes it possible to
join the cluster also if the specified target node is not a database
node.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go      | 31 ++++++++++++++-
 test/includes/clustering.sh | 78 +++++++++++++++++++++++++++++++++++++
 test/main.sh                |  1 +
 test/suites/clustering.sh   | 94 +++++++++++++++------------------------------
 4 files changed, 139 insertions(+), 65 deletions(-)

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 64c891b02..342006939 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"net"
 	"net/http"
+	"net/url"
 	"strconv"
 	"time"
 
@@ -132,10 +133,38 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 		g.server.ServeHTTP(w, r)
 	}
 	raft := func(w http.ResponseWriter, r *http.Request) {
-		if g.raft == nil || g.raft.HandlerFunc() == nil {
+		// If we are not part of the raft cluster, reply with a
+		// redirect to one of the raft nodes that we know about.
+		if g.raft == nil {
+			var address string
+			err := g.db.Transaction(func(tx *db.NodeTx) error {
+				nodes, err := tx.RaftNodes()
+				if err != nil {
+					return err
+				}
+				address = nodes[0].Address
+				return nil
+			})
+			if err != nil {
+				http.Error(w, "500 failed to fetch raft nodes", http.StatusInternalServerError)
+				return
+			}
+			url := &url.URL{
+				Scheme:   "http",
+				Path:     r.URL.Path,
+				RawQuery: r.URL.RawQuery,
+				Host:     address,
+			}
+			http.Redirect(w, r, url.String(), http.StatusPermanentRedirect)
+			return
+		}
+
+		// If this node is not clustered return a 404.
+		if g.raft.HandlerFunc() == nil {
 			http.NotFound(w, r)
 			return
 		}
+
 		g.raft.HandlerFunc()(w, r)
 	}
 
diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index 6ffda750c..a87af60df 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -70,3 +70,81 @@ teardown_clustering_netns() {
       ip netns delete "${ns}"
   done
 }
+
+spawn_lxd_and_bootstrap_cluster() {
+  set -e
+  ns="${1}"
+  bridge="${2}"
+  LXD_DIR="${3}"
+  LXD_NETNS="${ns}" spawn_lxd "${LXD_DIR}" false
+  (
+    set -e
+
+    cat <<EOF | lxd init --preseed
+config:
+  core.trust_password: sekret
+  core.https_address: 10.1.1.101:8443
+  images.auto_update_interval: 15
+storage_pools:
+- name: data
+  driver: dir
+networks:
+- name: $bridge
+  type: bridge
+  config:
+    ipv4.address: none
+    ipv6.address: none
+profiles:
+- name: default
+  devices:
+    root:
+      path: /
+      pool: data
+      type: disk
+cluster:
+  name: node1
+EOF
+  )
+}
+
+spawn_lxd_and_join_cluster() {
+  set -e
+  ns="${1}"
+  bridge="${2}"
+  cert="${3}"
+  index="${4}"
+  target="${5}"
+  LXD_DIR="${6}"
+
+  LXD_NETNS="${ns}" spawn_lxd "${LXD_DIR}" false
+  (
+    set -e
+
+    cat <<EOF | lxd init --preseed
+config:
+  core.https_address: 10.1.1.10${index}:8443
+  images.auto_update_interval: 15
+storage_pools:
+- name: data
+  driver: dir
+networks:
+- name: $bridge
+  type: bridge
+  config:
+    ipv4.address: none
+    ipv6.address: none
+profiles:
+- name: default
+  devices:
+    root:
+      path: /
+      pool: data
+      type: disk
+cluster:
+  name: node${index}
+  target_address: 10.1.1.10${target}:8443
+  target_password: sekret
+  target_cert: "$cert"
+EOF
+  )
+}
diff --git a/test/main.sh b/test/main.sh
index a45399dd8..96c5b99a4 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -194,6 +194,7 @@ run_test test_kernel_limits "kernel limits"
 run_test test_macaroon_auth "macaroon authentication"
 run_test test_console "console"
 run_test test_proxy_device "proxy device"
+run_test test_clustering "clustering"
 
 # shellcheck disable=SC2034
 TEST_RESULT=success
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 5ef8c7b60..27812d889 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -7,78 +7,17 @@ test_clustering() {
   LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
   chmod +x "${LXD_ONE_DIR}"
   ns1="${prefix}1"
-  LXD_NETNS="${ns1}" spawn_lxd "${LXD_ONE_DIR}" false
-  (
-    set -e
-    # shellcheck disable=SC2034
-    LXD_DIR=${LXD_ONE_DIR}
-
-  cat <<EOF | lxd init --preseed
-config:
-  core.trust_password: sekret
-  core.https_address: 10.1.1.101:8443
-  images.auto_update_interval: 15
-storage_pools:
-- name: data
-  driver: dir
-networks:
-- name: $bridge
-  type: bridge
-  config:
-    ipv4.address: none
-    ipv6.address: none
-profiles:
-- name: default
-  devices:
-    root:
-      path: /
-      pool: data
-      type: disk
-cluster:
-  name: one
-EOF
-  )
+  spawn_lxd_and_bootstrap_cluster "${ns1}" "${bridge}" "${LXD_ONE_DIR}"
 
   # Add a newline at the end of each line. YAML as weird rules..
   cert=$(sed ':a;N;$!ba;s/\n/\n\n/g' "${LXD_ONE_DIR}/server.crt")
 
+  # Spawn a second node
   setup_clustering_netns 2
   LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
   chmod +x "${LXD_TWO_DIR}"
   ns2="${prefix}2"
-  LXD_NETNS="${ns2}" spawn_lxd "${LXD_TWO_DIR}" false
-  (
-    set -e
-    # shellcheck disable=SC2034
-    LXD_DIR=${LXD_TWO_DIR}
-
-  cat <<EOF | lxd init --preseed
-config:
-  core.https_address: 10.1.1.102:8443
-  images.auto_update_interval: 15
-storage_pools:
-- name: data
-  driver: dir
-networks:
-- name: $bridge
-  type: bridge
-  config:
-    ipv4.address: none
-    ipv6.address: none
-profiles:
-- name: default
-  devices:
-    root:
-      path: /
-      pool: data
-      type: disk
-cluster:
-  name: two
-  target_address: 10.1.1.101:8443
-  target_password: sekret
-  target_cert: "$cert"
-EOF
-  )
+  spawn_lxd_and_join_cluster "${ns2}" "${bridge}" "${cert}" 2 1 "${LXD_TWO_DIR}"
 
   # Configuration keys can be changed on any node.
   LXD_DIR="${LXD_TWO_DIR}" lxc config set images.auto_update_interval 10
@@ -89,13 +28,40 @@ EOF
   ip netns exec "${ns1}" ip link show "${bridge}" > /dev/null
   ip netns exec "${ns2}" ip link show "${bridge}" > /dev/null
 
+  # Spawn a third node, using the non-leader node2 as join target.
+  setup_clustering_netns 3
+  LXD_THREE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_THREE_DIR}"
+  ns3="${prefix}3"
+  spawn_lxd_and_join_cluster "${ns3}" "${bridge}" "${cert}" 3 2 "${LXD_THREE_DIR}"
+
+  # Spawn a fourth node, this will be a non-database node.
+  setup_clustering_netns 4
+  LXD_FOUR_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_FOUR_DIR}"
+  ns4="${prefix}4"
+  spawn_lxd_and_join_cluster "${ns4}" "${bridge}" "${cert}" 4 1 "${LXD_FOUR_DIR}"
+
+  # Spawn a fifth node, using non-database node4 as join target.
+  setup_clustering_netns 5
+  LXD_FIVE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_FIVE_DIR}"
+  ns5="${prefix}5"
+  spawn_lxd_and_join_cluster "${ns5}" "${bridge}" "${cert}" 5 4 "${LXD_FIVE_DIR}"
+
   # The preseeded network can be deleted from any node, other nodes
   # are notified.
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
+  LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown
+  LXD_DIR="${LXD_FOUR_DIR}" lxd shutdown
+  LXD_DIR="${LXD_THREE_DIR}" lxd shutdown
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2
+  rm -f "${LXD_FIVE_DIR}/unix.socket"
+  rm -f "${LXD_FOUR_DIR}/unix.socket"
+  rm -f "${LXD_THREE_DIR}/unix.socket"
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
 }

From cca5dc29de0b92eae6f178e3a092ebd9ae11ca29 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 1 Nov 2017 10:47:22 +0000
Subject: [PATCH 072/227] Change bootstrap/join endpoint from /cluster to
 /cluster/nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/lxd_cluster.go |  6 +++---
 lxd/api_1.0.go        |  1 +
 lxd/api_cluster.go    | 29 +++++++++++++++++++++--------
 3 files changed, 25 insertions(+), 11 deletions(-)

diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 20a107b8b..e8baae3a5 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -27,7 +27,7 @@ func (r *ProtocolLXD) GetCluster(password string) (*api.Cluster, error) {
 // BootstrapCluster requests to bootstrap a new cluster.
 func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
 	cluster := api.ClusterPost{Name: name}
-	op, _, err := r.queryOperation("POST", "/cluster", cluster, "")
+	op, _, err := r.queryOperation("POST", "/cluster/nodes", cluster, "")
 	if err != nil {
 		return nil, err
 	}
@@ -45,7 +45,7 @@ func (r *ProtocolLXD) AcceptNode(targetPassword, name, address string, schema, a
 		TargetPassword: targetPassword,
 	}
 	info := &api.ClusterNodeAccepted{}
-	_, err := r.queryStruct("POST", "/cluster", cluster, "", &info)
+	_, err := r.queryStruct("POST", "/cluster/nodes", cluster, "", &info)
 
 	if err != nil {
 		return nil, err
@@ -62,7 +62,7 @@ func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword, targetCert, nam
 		TargetCert:     targetCert,
 		Name:           name,
 	}
-	op, _, err := r.queryOperation("POST", "/cluster", cluster, "")
+	op, _, err := r.queryOperation("POST", "/cluster/nodes", cluster, "")
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 1a1cbf561..436361a73 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -60,6 +60,7 @@ var api10 = []Command{
 	storagePoolVolumeTypeCmd,
 	serverResourceCmd,
 	clusterCmd,
+	clusterNodesCmd,
 }
 
 func api10Get(d *Daemon, r *http.Request) Response {
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 7d9d624c6..0cf11e697 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -15,8 +15,10 @@ import (
 	"github.com/pkg/errors"
 )
 
-var clusterCmd = Command{name: "cluster", untrustedGet: true, get: clusterGet, untrustedPost: true, post: clusterPost}
+var clusterCmd = Command{name: "cluster", untrustedGet: true, get: clusterGet}
 
+// Return information about the cluster, such as the current networks and
+// storage pools, typically needed when a new node is joining.
 func clusterGet(d *Daemon, r *http.Request) Response {
 	// If the client is not trusted, check that it's presenting the trust
 	// password.
@@ -62,7 +64,18 @@ func clusterGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, cluster)
 }
 
-func clusterPost(d *Daemon, r *http.Request) Response {
+var clusterNodesCmd = Command{name: "cluster/nodes", untrustedPost: true, post: clusterNodesPost}
+
+// Depending on the parameters passed and on local state this endpoint will
+// either:
+//
+// - bootstrap a new cluster (if this node is not clustered yet)
+// - request to join an existing cluster
+// - accept the request of a node to join the cluster
+//
+// The client is required to be trusted when bootstrapping a cluster or request
+// to join an existing cluster.
+func clusterNodesPost(d *Daemon, r *http.Request) Response {
 	req := api.ClusterPost{}
 
 	// Parse the request
@@ -85,20 +98,20 @@ func clusterPost(d *Daemon, r *http.Request) Response {
 		if !trusted {
 			return Forbidden
 		}
-		return clusterPostBootstrap(d, req)
+		return clusterNodesPostBootstrap(d, req)
 	} else if req.TargetAddress == "" {
-		return clusterPostAccept(d, req)
+		return clusterNodesPostAccept(d, req)
 	} else {
 		// Joining an existing cluster requires the client to be
 		// trusted.
 		if !trusted {
 			return Forbidden
 		}
-		return clusterPostJoin(d, req)
+		return clusterNodesPostJoin(d, req)
 	}
 }
 
-func clusterPostBootstrap(d *Daemon, req api.ClusterPost) Response {
+func clusterNodesPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 	run := func(op *operation) error {
 		return cluster.Bootstrap(d.State(), d.gateway, req.Name)
 	}
@@ -113,7 +126,7 @@ func clusterPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 	return OperationResponse(op)
 }
 
-func clusterPostAccept(d *Daemon, req api.ClusterPost) Response {
+func clusterNodesPostAccept(d *Daemon, req api.ClusterPost) Response {
 	// Accepting a node requires the client to provide the correct
 	// trust password.
 	secret, err := cluster.ConfigGetString(d.cluster, "core.trust_password")
@@ -138,7 +151,7 @@ func clusterPostAccept(d *Daemon, req api.ClusterPost) Response {
 	return SyncResponse(true, accepted)
 }
 
-func clusterPostJoin(d *Daemon, req api.ClusterPost) Response {
+func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 	// Make sure basic pre-conditions are ment.
 	if len(req.TargetCert) == 0 {
 		return BadRequest(fmt.Errorf("No target cluster node certificate provided"))

From 794de798f4f5dc4ae9df93a1e2810ade2316b327 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 1 Nov 2017 11:53:46 +0000
Subject: [PATCH 073/227] Add db APIs to remove a node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/node.go                    | 74 +++++++++++++++++++++++++++++++++++++--
 lxd/db/node_test.go               | 57 +++++++++++++++++++++++++++++-
 lxd/db/transaction_export_test.go | 11 ++++++
 3 files changed, 139 insertions(+), 3 deletions(-)
 create mode 100644 lxd/db/transaction_export_test.go

diff --git a/lxd/db/node.go b/lxd/db/node.go
index e029d1b31..743a6bd9e 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -27,8 +27,8 @@ func (n NodeInfo) IsDown() bool {
 	return n.Heartbeat.Before(time.Now().Add(-20 * time.Second))
 }
 
-// Node returns the node with the given network address.
-func (c *ClusterTx) Node(address string) (NodeInfo, error) {
+// NodeByAddress returns the node with the given network address.
+func (c *ClusterTx) NodeByAddress(address string) (NodeInfo, error) {
 	null := NodeInfo{}
 	nodes, err := c.nodes("address=?", address)
 	if err != nil {
@@ -44,6 +44,23 @@ func (c *ClusterTx) Node(address string) (NodeInfo, error) {
 	}
 }
 
+// NodeByName returns the node with the given name.
+func (c *ClusterTx) NodeByName(name string) (NodeInfo, error) {
+	null := NodeInfo{}
+	nodes, err := c.nodes("name=?", name)
+	if err != nil {
+		return null, err
+	}
+	switch len(nodes) {
+	case 0:
+		return null, NoSuchObjectError
+	case 1:
+		return nodes[0], nil
+	default:
+		return null, fmt.Errorf("more than one node matches")
+	}
+}
+
 // Nodes returns all LXD nodes part of the cluster.
 //
 // If this LXD instance is not clustered, a list with a single node whose
@@ -104,6 +121,22 @@ func (c *ClusterTx) NodeUpdate(id int64, name string, address string) error {
 	return nil
 }
 
+// NodeRemove removes the node with the given id.
+func (c *ClusterTx) NodeRemove(id int64) error {
+	result, err := c.tx.Exec("DELETE FROM nodes WHERE id=?", id)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return fmt.Errorf("query deleted %d rows instead of 1", n)
+	}
+	return nil
+}
+
 // NodeHeartbeat updates the heartbeat column of the node with the given address.
 func (c *ClusterTx) NodeHeartbeat(address string, heartbeat time.Time) error {
 	stmt := "UPDATE nodes SET heartbeat=? WHERE address=?"
@@ -120,3 +153,40 @@ func (c *ClusterTx) NodeHeartbeat(address string, heartbeat time.Time) error {
 	}
 	return nil
 }
+
+// NodeIsEmpty returns true if the node with the given ID has no containers or
+// images associated with it.
+func (c *ClusterTx) NodeIsEmpty(id int64) (bool, error) {
+	n, err := query.Count(c.tx, "containers", "node_id=?", id)
+	if err != nil {
+		return false, errors.Wrapf(err, "failed to get containers count for node %d", id)
+	}
+	if n > 0 {
+		return false, nil
+	}
+
+	n, err = query.Count(c.tx, "images", "node_id=?", id)
+	if err != nil {
+		return false, errors.Wrapf(err, "failed to get images count for node %d", id)
+	}
+	if n > 0 {
+		return false, nil
+	}
+
+	return true, nil
+}
+
+// NodeClear removes any container or image associated with this node.
+func (c *ClusterTx) NodeClear(id int64) error {
+	_, err := c.tx.Exec("DELETE FROM containers WHERE node_id=?", id)
+	if err != nil {
+		return err
+	}
+
+	_, err = c.tx.Exec("DELETE FROM images WHERE node_id=?", id)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index f59a08d1f..22ee430d9 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -24,12 +24,38 @@ func TestNodeAdd(t *testing.T) {
 	require.NoError(t, err)
 	require.Len(t, nodes, 2)
 
-	node, err := tx.Node("1.2.3.4:666")
+	node, err := tx.NodeByAddress("1.2.3.4:666")
+	require.NoError(t, err)
 	assert.Equal(t, "buzz", node.Name)
 	assert.Equal(t, "1.2.3.4:666", node.Address)
 	assert.Equal(t, cluster.SchemaVersion, node.Schema)
 	assert.Equal(t, len(version.APIExtensions), node.APIExtensions)
 	assert.False(t, node.IsDown())
+
+	node, err = tx.NodeByName("buzz")
+	require.NoError(t, err)
+	assert.Equal(t, "buzz", node.Name)
+}
+
+// Remove a new raft node.
+func TestNodeRemove(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	id, err := tx.NodeAdd("rusp", "5.6.7.8:666")
+	require.NoError(t, err)
+
+	err = tx.NodeRemove(id)
+	require.NoError(t, err)
+
+	_, err = tx.NodeByName("buzz")
+	assert.NoError(t, err)
+
+	_, err = tx.NodeByName("rusp")
+	assert.Equal(t, db.NoSuchObjectError, err)
 }
 
 // Update the heartbeat of a node.
@@ -50,3 +76,32 @@ func TestNodeHeartbeat(t *testing.T) {
 	node := nodes[1]
 	assert.True(t, node.IsDown())
 }
+
+// A node is considered empty only if it has no containers and no images.
+func TestNodeIsEmpty(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	id, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	empty, err := tx.NodeIsEmpty(id)
+	require.NoError(t, err)
+	assert.True(t, empty)
+
+	_, err = tx.Tx().Exec(`
+INSERT INTO containers (id, node_id, name, architecture, type) VALUES (1, ?, 'foo', 1, 1)
+`, id)
+	require.NoError(t, err)
+
+	empty, err = tx.NodeIsEmpty(id)
+	require.NoError(t, err)
+	assert.False(t, empty)
+
+	err = tx.NodeClear(id)
+	require.NoError(t, err)
+
+	empty, err = tx.NodeIsEmpty(id)
+	require.NoError(t, err)
+	assert.True(t, empty)
+}
diff --git a/lxd/db/transaction_export_test.go b/lxd/db/transaction_export_test.go
new file mode 100644
index 000000000..31884382f
--- /dev/null
+++ b/lxd/db/transaction_export_test.go
@@ -0,0 +1,11 @@
+package db
+
+import "database/sql"
+
+// Tx returns the low level database handle to the cluster transaction.
+//
+// FIXME: this is needed by tests that need to interact with entities that have
+// no high-level ClusterTx APIs yet (containers, images, etc.).
+func (c *ClusterTx) Tx() *sql.Tx {
+	return c.tx
+}

From 174969477307145086a4526880e38b8626b8cfba Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 1 Nov 2017 11:54:05 +0000
Subject: [PATCH 074/227] Add cluster.Leave function implementing logic to
 leave a cluster.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 106 ++++++++++++++++++++++++++++++++++++++++-
 lxd/cluster/membership_test.go |  20 ++++++++
 2 files changed, 125 insertions(+), 1 deletion(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index ac1bd6e5a..7ca99e52b 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -7,6 +7,8 @@ import (
 	"strconv"
 	"time"
 
+	"github.com/CanonicalLtd/raft-http"
+	"github.com/CanonicalLtd/raft-membership"
 	"github.com/hashicorp/raft"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/db/cluster"
@@ -296,7 +298,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 	// the new gRPC network connection. Also, update the storage_pools and
 	// networks tables with our local configuration.
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-		node, err := tx.Node(address)
+		node, err := tx.NodeByAddress(address)
 		if err != nil {
 			return errors.Wrap(err, "failed to get ID of joining node")
 		}
@@ -349,6 +351,86 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 	return nil
 }
 
+// Leave a cluster.
+//
+// If the force flag is true, the node will be removed even if it still has
+// containers and images.
+//
+// Upon success, return the address of the leaving node.
+func Leave(state *state.State, gateway *Gateway, name string, force bool) (string, error) {
+	// Delete the node from the cluster and track its address.
+	var address string
+	err := state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		// Get the node (if it doesn't exists an error is returned).
+		node, err := tx.NodeByName(name)
+		if err != nil {
+			return err
+		}
+
+		// Check that the node is eligeable for leaving.
+		if !force {
+			err = membershipCheckClusterStateForLeave(tx, node.ID)
+		} else {
+			err = tx.NodeClear(node.ID)
+		}
+		if err != nil {
+			return err
+		}
+
+		// Actually remove the node from the cluster database.
+		err = tx.NodeRemove(node.ID)
+		if err != nil {
+			return err
+		}
+		address = node.Address
+		return nil
+	})
+	if err != nil {
+		return "", err
+	}
+
+	// If the node is a database node, leave the raft cluster too.
+	id := ""
+	target := ""
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		if err != nil {
+			return err
+		}
+		for i, node := range nodes {
+			if node.Address == address {
+				id = strconv.Itoa(int(node.ID))
+				// Save the address of another database node,
+				// we'll use it to leave the raft cluster.
+				target = nodes[(i+1)%len(nodes)].Address
+				break
+			}
+		}
+		return nil
+	})
+	if err != nil {
+		return "", err
+	}
+
+	if target != "" {
+		logger.Info(
+			"Remove node from dqlite raft cluster",
+			log15.Ctx{"id": id, "address": address, "target": target})
+		dial, err := raftDial(gateway.cert)
+		if err != nil {
+			return "", err
+		}
+		err = rafthttp.ChangeMembership(
+			raftmembership.LeaveRequest, raftEndpoint, dial,
+			raft.ServerID(id), address, target, 5*time.Second)
+		if err != nil {
+			return "", err
+		}
+	}
+
+	return address, nil
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
@@ -418,6 +500,28 @@ func membershipCheckClusterStateForAccept(tx *db.ClusterTx, name string, address
 	return nil
 }
 
+// Check that cluster-related preconditions are met for leaving a cluster.
+func membershipCheckClusterStateForLeave(tx *db.ClusterTx, nodeID int64) error {
+	// Check that it has no containers or images.
+	empty, err := tx.NodeIsEmpty(nodeID)
+	if err != nil {
+		return err
+	}
+	if !empty {
+		return fmt.Errorf("node has containers or images")
+	}
+
+	// Check that it's not the last node.
+	nodes, err := tx.Nodes()
+	if err != nil {
+		return err
+	}
+	if len(nodes) == 1 {
+		return fmt.Errorf("node is the only node in the cluster")
+	}
+	return nil
+}
+
 // Check that there is no left-over cluster certificate in the LXD var dir of
 // this node.
 func membershipCheckNoLeftoverClusterCert(dir string) error {
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index fd4489ac0..4cc58d012 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -310,6 +310,26 @@ func TestJoin(t *testing.T) {
 	assert.Equal(t, targetAddress, nodes[0].Address)
 	assert.Equal(t, int64(2), nodes[1].ID)
 	assert.Equal(t, address, nodes[1].Address)
+
+	// Leave the cluster.
+	leaving, err := cluster.Leave(state, gateway, "rusp", false /* force */)
+	require.NoError(t, err)
+	assert.Equal(t, address, leaving)
+
+	// The node has gone from the cluster db.
+	err = targetState.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		assert.Len(t, nodes, 1)
+		return nil
+	})
+	require.NoError(t, err)
+
+	// The node has gone from the raft cluster.
+	raft := targetGateway.Raft()
+	future := raft.GetConfiguration()
+	require.NoError(t, future.Error())
+	assert.Len(t, future.Configuration().Servers, 1)
 }
 
 // Helper for setting fixtures for Bootstrap tests.

From 10d5557c6d008815951f9583ba512eb871bfcab9 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 1 Nov 2017 14:06:13 +0000
Subject: [PATCH 075/227] Add REST API for leaving a cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go    |  1 +
 client/lxd_cluster.go   | 15 +++++++++
 lxd/api_1.0.go          |  1 +
 lxd/api_cluster.go      | 88 ++++++++++++++++++++++++++++++++++++++++++++++++-
 lxd/api_cluster_test.go | 17 ++++++++++
 lxd/cluster/gateway.go  | 25 ++++++++++++++
 6 files changed, 146 insertions(+), 1 deletion(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index c5744512b..e9fc0b6b1 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -166,6 +166,7 @@ type ContainerServer interface {
 	BootstrapCluster(name string) (op *Operation, err error)
 	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
 	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
+	LeaveCluster(name string, force bool) (op *Operation, err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index e8baae3a5..7afc29fef 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -69,3 +69,18 @@ func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword, targetCert, nam
 
 	return op, nil
 }
+
+// LeaveCluster makes the given node leave the cluster (gracefully or not,
+// depending on the force flag).
+func (r *ProtocolLXD) LeaveCluster(name string, force bool) (*Operation, error) {
+	params := ""
+	if force {
+		params += "?force=1"
+	}
+	url := fmt.Sprintf("/cluster/nodes/%s%s", name, params)
+	op, _, err := r.queryOperation("DELETE", url, nil, "")
+	if err != nil {
+		return nil, err
+	}
+	return op, nil
+}
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 436361a73..9c986e153 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -61,6 +61,7 @@ var api10 = []Command{
 	serverResourceCmd,
 	clusterCmd,
 	clusterNodesCmd,
+	clusterNodeCmd,
 }
 
 func api10Get(d *Daemon, r *http.Request) Response {
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 0cf11e697..400f3a50f 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -4,18 +4,23 @@ import (
 	"encoding/json"
 	"fmt"
 	"net/http"
+	"os"
+	"path/filepath"
+	"strconv"
 
+	"github.com/gorilla/mux"
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/version"
 	"github.com/pkg/errors"
 )
 
-var clusterCmd = Command{name: "cluster", untrustedGet: true, get: clusterGet}
+var clusterCmd = Command{name: "cluster", untrustedGet: true, get: clusterGet, delete: clusterDelete}
 
 // Return information about the cluster, such as the current networks and
 // storage pools, typically needed when a new node is joining.
@@ -64,6 +69,34 @@ func clusterGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, cluster)
 }
 
+// Disable clustering on a node.
+func clusterDelete(d *Daemon, r *http.Request) Response {
+	// Update our TLS configuration using our original certificate.
+	for _, suffix := range []string{"crt", "key", "ca"} {
+		path := filepath.Join(d.os.VarDir, "cluster."+suffix)
+		if !shared.PathExists(path) {
+			continue
+		}
+		err := os.Remove(path)
+		if err != nil {
+			return InternalError(err)
+		}
+	}
+	cert, err := util.LoadCert(d.os.VarDir)
+	if err != nil {
+		return InternalError(errors.Wrap(err, "failed to parse node certificate"))
+	}
+
+	// Reset the cluster database and make it local to this node.
+	d.endpoints.NetworkUpdateCert(cert)
+	err = d.gateway.Reset(cert)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	return EmptySyncResponse
+}
+
 var clusterNodesCmd = Command{name: "cluster/nodes", untrustedPost: true, post: clusterNodesPost}
 
 // Depending on the parameters passed and on local state this endpoint will
@@ -215,3 +248,56 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 
 	return OperationResponse(op)
 }
+
+var clusterNodeCmd = Command{name: "cluster/nodes/{name}", delete: clusterNodeDelete}
+
+func clusterNodeDelete(d *Daemon, r *http.Request) Response {
+	force, err := strconv.Atoi(r.FormValue("force"))
+	if err != nil {
+		force = 0
+	}
+
+	name := mux.Vars(r)["name"]
+	address, err := cluster.Leave(d.State(), d.gateway, name, force == 1)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	var run func(op *operation) error
+
+	if force == 1 {
+		// If the force flag is on, the returned operation is a no-op.
+		run = func(op *operation) error {
+			return nil
+		}
+
+	} else {
+		// Try to gracefully disable clustering on the target node.
+		cert := d.endpoints.NetworkCert()
+		args := &lxd.ConnectionArgs{
+			TLSServerCert: string(cert.PublicKey()),
+			TLSClientCert: string(cert.PublicKey()),
+			TLSClientKey:  string(cert.PrivateKey()),
+		}
+		run = func(op *operation) error {
+			// First request for this node to be added to the list of
+			// cluster nodes.
+			client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", address), args)
+			if err != nil {
+				return err
+			}
+			_, _, err = client.RawQuery("DELETE", "/1.0/cluster", nil, "")
+			return err
+		}
+	}
+
+	resources := map[string][]string{}
+	resources["cluster"] = []string{}
+
+	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	return OperationResponse(op)
+}
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 55ba594ac..e4b58bb33 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -136,6 +136,23 @@ func TestCluster_Failover(t *testing.T) {
 	}
 }
 
+// A node can leave a cluster gracefully.
+func TestCluster_Leave(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping cluster leave test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	client := f.ClientUnix(daemons[1])
+	op, err := client.LeaveCluster("rusp-0", false)
+	require.NoError(t, err)
+	assert.NoError(t, op.Wait())
+}
+
 // Test helper for cluster-related APIs.
 type clusterFixture struct {
 	t       *testing.T
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 342006939..ae5aa01b9 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -5,6 +5,8 @@ import (
 	"net"
 	"net/http"
 	"net/url"
+	"os"
+	"path/filepath"
 	"strconv"
 	"time"
 
@@ -238,6 +240,29 @@ func (g *Gateway) Shutdown() error {
 	return g.raft.Shutdown()
 }
 
+// Reset the gateway, shutting it down and starting against from scratch using
+// the given certificate.
+//
+// This is used when disabling clustering on a node.
+func (g *Gateway) Reset(cert *shared.CertInfo) error {
+	err := g.Shutdown()
+	if err != nil {
+		return err
+	}
+	err = os.RemoveAll(filepath.Join(g.db.Dir(), "raft"))
+	if err != nil {
+		return err
+	}
+	err = g.db.Transaction(func(tx *db.NodeTx) error {
+		return tx.RaftNodesReplace(nil)
+	})
+	if err != nil {
+		return err
+	}
+	g.cert = cert
+	return g.init()
+}
+
 // Initialize the gateway, creating a new raft factory and gRPC server (if this
 // node is a database node), and a gRPC dialer.
 func (g *Gateway) init() error {

From a3c7793c9d25c6bae1a077056f32f10b9b30d059 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 6 Nov 2017 11:54:08 +0000
Subject: [PATCH 076/227] Add lxc cluster remove command

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/cluster.go            | 74 +++++++++++++++++++++++++++++++++++++++++++++++
 lxc/main.go               |  1 +
 test/suites/clustering.sh | 19 ++++++++++--
 3 files changed, 91 insertions(+), 3 deletions(-)
 create mode 100644 lxc/cluster.go

diff --git a/lxc/cluster.go b/lxc/cluster.go
new file mode 100644
index 000000000..7abfaa284
--- /dev/null
+++ b/lxc/cluster.go
@@ -0,0 +1,74 @@
+package main
+
+import (
+	"fmt"
+
+	"github.com/lxc/lxd/lxc/config"
+	"github.com/lxc/lxd/shared/gnuflag"
+	"github.com/lxc/lxd/shared/i18n"
+)
+
+type clusterCmd struct {
+	force bool
+}
+
+func (c *clusterCmd) usage() string {
+	return i18n.G(
+		`Usage: lxc cluster <subcommand> [options]
+
+Manage cluster nodes.
+
+*Cluster nodes*
+lxc cluster remove <node> [--force]
+    Remove a node from the cluster.`)
+}
+
+func (c *clusterCmd) flags() {
+	gnuflag.BoolVar(&c.force, "force", false, i18n.G("Force removing a node, even if degraded"))
+}
+
+func (c *clusterCmd) showByDefault() bool {
+	return true
+}
+
+func (c *clusterCmd) run(conf *config.Config, args []string) error {
+	if len(args) < 1 {
+		return errUsage
+	}
+
+	if args[0] == "remove" {
+		return c.doClusterNodeRemove(conf, args)
+	}
+
+	return nil
+}
+
+func (c *clusterCmd) doClusterNodeRemove(conf *config.Config, args []string) error {
+	if len(args) < 2 {
+		return errArgs
+	}
+
+	// [[lxc cluster]] remove production:bionic-1
+	remote, name, err := conf.ParseRemote(args[1])
+	if err != nil {
+		return err
+	}
+
+	client, err := conf.GetContainerServer(remote)
+	if err != nil {
+		return err
+	}
+
+	op, err := client.LeaveCluster(name, c.force)
+	if err != nil {
+		return err
+	}
+
+	err = op.Wait()
+	if err != nil {
+		return nil
+	}
+
+	fmt.Printf(i18n.G("Node %s removed")+"\n", name)
+	return nil
+}
diff --git a/lxc/main.go b/lxc/main.go
index 54c93df3b..29f0ced53 100644
--- a/lxc/main.go
+++ b/lxc/main.go
@@ -213,6 +213,7 @@ type command interface {
 }
 
 var commands = map[string]command{
+	"cluster":   &clusterCmd{},
 	"config":    &configCmd{},
 	"console":   &consoleCmd{},
 	"copy":      &copyCmd{},
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 27812d889..119529ed9 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -49,11 +49,24 @@ test_clustering() {
   ns5="${prefix}5"
   spawn_lxd_and_join_cluster "${ns5}" "${bridge}" "${cert}" 5 4 "${LXD_FIVE_DIR}"
 
-  # The preseeded network can be deleted from any node, other nodes
-  # are notified.
+  # Shutdown a non-database node, and wait a few seconds so it will be
+  # detected as down.
+  LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown
+  sleep 5
+
+  # Trying to delete the preseeded network now fails, because a node is degraded.
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
+
+  # Force the removal of the degraded node.
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster remove node5 --force
+
+  # Now the preseeded network can be deleted, and all nodes are
+  # notified.
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
-  LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown
+  # Remove a node gracefully.
+  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster remove node4
+
   LXD_DIR="${LXD_FOUR_DIR}" lxd shutdown
   LXD_DIR="${LXD_THREE_DIR}" lxd shutdown
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown

From 31465f0e22345a68f9b85cdcc37369a4a557ac68 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 6 Nov 2017 12:26:14 +0000
Subject: [PATCH 077/227] Add cluster.List to get a list of current cluster
 nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 39 +++++++++++++++++++++++++++++++++++++++
 lxd/cluster/membership_test.go | 21 ++++++++++++++-------
 2 files changed, 53 insertions(+), 7 deletions(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 7ca99e52b..80631d02c 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -431,6 +431,45 @@ func Leave(state *state.State, gateway *Gateway, name string, force bool) (strin
 	return address, nil
 }
 
+// List the nodes of the cluster.
+//
+// Upon success return a list of the current nodes and a map that for each ID
+// tells if the node is part of the database cluster or not.
+func List(state *state.State) ([]db.NodeInfo, map[int64]bool, error) {
+	addresses := []string{} // Addresses of database nodes
+	err := state.Node.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch current raft nodes")
+		}
+		for _, node := range nodes {
+			addresses = append(addresses, node.Address)
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var nodes []db.NodeInfo
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err = tx.Nodes()
+		if err != nil {
+			return err
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, nil, err
+	}
+	flags := make(map[int64]bool) // Whether a node is a database node
+	for _, node := range nodes {
+		flags[node.ID] = shared.StringInSlice(node.Address, addresses)
+	}
+
+	return nodes, flags, nil
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 4cc58d012..bfa5cce8f 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -294,22 +294,29 @@ func TestJoin(t *testing.T) {
 	f.NetworkAddress(address)
 
 	// Accept the joining node.
-	nodes, err := cluster.Accept(
+	raftNodes, err := cluster.Accept(
 		targetState, "rusp", address, cluster.SchemaVersion, len(version.APIExtensions))
 	require.NoError(t, err)
 
 	// Actually join the cluster.
-	err = cluster.Join(state, gateway, targetCert, "rusp", nodes)
+	err = cluster.Join(state, gateway, targetCert, "rusp", raftNodes)
 	require.NoError(t, err)
 
 	// The leader now returns an updated list of raft nodes.
-	nodes, err = targetGateway.RaftNodes()
+	raftNodes, err = targetGateway.RaftNodes()
+	require.NoError(t, err)
+	assert.Len(t, raftNodes, 2)
+	assert.Equal(t, int64(1), raftNodes[0].ID)
+	assert.Equal(t, targetAddress, raftNodes[0].Address)
+	assert.Equal(t, int64(2), raftNodes[1].ID)
+	assert.Equal(t, address, raftNodes[1].Address)
+
+	// The List function returns all nodes in the cluster.
+	nodes, flags, err := cluster.List(state)
 	require.NoError(t, err)
 	assert.Len(t, nodes, 2)
-	assert.Equal(t, int64(1), nodes[0].ID)
-	assert.Equal(t, targetAddress, nodes[0].Address)
-	assert.Equal(t, int64(2), nodes[1].ID)
-	assert.Equal(t, address, nodes[1].Address)
+	assert.True(t, flags[1])
+	assert.True(t, flags[2])
 
 	// Leave the cluster.
 	leaving, err := cluster.Leave(state, gateway, "rusp", false /* force */)

From 81c6069ebdefb18d61d48042a13955044896921f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 6 Nov 2017 12:42:29 +0000
Subject: [PATCH 078/227] Add GetNodes API client method

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go      |  1 +
 client/lxd_cluster.go     | 13 ++++++++++++
 lxc/cluster.go            | 53 +++++++++++++++++++++++++++++++++++++++++++++++
 lxd/api_cluster.go        | 27 +++++++++++++++++++++++-
 lxd/api_cluster_test.go   |  9 ++++++++
 shared/api/cluster.go     |  8 +++++++
 test/suites/clustering.sh |  6 +++++-
 7 files changed, 115 insertions(+), 2 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index e9fc0b6b1..4b34b0c85 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -167,6 +167,7 @@ type ContainerServer interface {
 	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
 	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
 	LeaveCluster(name string, force bool) (op *Operation, err error)
+	GetNodes() (nodes []api.Node, err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 7afc29fef..5d702459c 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -84,3 +84,16 @@ func (r *ProtocolLXD) LeaveCluster(name string, force bool) (*Operation, error)
 	}
 	return op, nil
 }
+
+// GetNodes returns the current nodes in the cluster.
+func (r *ProtocolLXD) GetNodes() ([]api.Node, error) {
+	nodes := []api.Node{}
+	path := "/cluster/nodes"
+	_, err := r.queryStruct("GET", path, nil, "", &nodes)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return nodes, nil
+}
diff --git a/lxc/cluster.go b/lxc/cluster.go
index 7abfaa284..8cac3c4fa 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -2,10 +2,13 @@ package main
 
 import (
 	"fmt"
+	"os"
+	"sort"
 
 	"github.com/lxc/lxd/lxc/config"
 	"github.com/lxc/lxd/shared/gnuflag"
 	"github.com/lxc/lxd/shared/i18n"
+	"github.com/olekukonko/tablewriter"
 )
 
 type clusterCmd struct {
@@ -36,6 +39,10 @@ func (c *clusterCmd) run(conf *config.Config, args []string) error {
 		return errUsage
 	}
 
+	if args[0] == "list" {
+		return c.doClusterList(conf, args)
+	}
+
 	if args[0] == "remove" {
 		return c.doClusterNodeRemove(conf, args)
 	}
@@ -72,3 +79,49 @@ func (c *clusterCmd) doClusterNodeRemove(conf *config.Config, args []string) err
 	fmt.Printf(i18n.G("Node %s removed")+"\n", name)
 	return nil
 }
+
+func (c *clusterCmd) doClusterList(conf *config.Config, args []string) error {
+	remote := conf.DefaultRemote
+
+	if len(args) > 1 {
+		var err error
+		remote, _, err = conf.ParseRemote(args[1])
+		if err != nil {
+			return err
+		}
+	}
+
+	client, err := conf.GetContainerServer(remote)
+	if err != nil {
+		return err
+	}
+
+	nodes, err := client.GetNodes()
+	if err != nil {
+		return err
+	}
+
+	data := [][]string{}
+	for _, node := range nodes {
+		database := "NO"
+		if node.Database {
+			database = "YES"
+		}
+		data = append(data, []string{node.Name, node.URL, database, node.State})
+	}
+
+	table := tablewriter.NewWriter(os.Stdout)
+	table.SetAutoWrapText(false)
+	table.SetAlignment(tablewriter.ALIGN_LEFT)
+	table.SetRowLine(true)
+	table.SetHeader([]string{
+		i18n.G("NAME"),
+		i18n.G("URL"),
+		i18n.G("DATABASE"),
+		i18n.G("STATE")})
+	sort.Sort(byName(data))
+	table.AppendBulk(data)
+	table.Render()
+
+	return nil
+}
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 400f3a50f..e31ac010e 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -97,7 +97,11 @@ func clusterDelete(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-var clusterNodesCmd = Command{name: "cluster/nodes", untrustedPost: true, post: clusterNodesPost}
+var clusterNodesCmd = Command{
+	name: "cluster/nodes",
+	post: clusterNodesPost, untrustedPost: true,
+	get: clusterNodesGet,
+}
 
 // Depending on the parameters passed and on local state this endpoint will
 // either:
@@ -249,6 +253,27 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 	return OperationResponse(op)
 }
 
+func clusterNodesGet(d *Daemon, r *http.Request) Response {
+	dbNodes, flags, err := cluster.List(d.State())
+	if err != nil {
+		return SmartError(err)
+	}
+
+	nodes := make([]api.Node, len(dbNodes))
+	for i, dbNode := range dbNodes {
+		nodes[i].Name = dbNode.Name
+		nodes[i].URL = fmt.Sprintf("https://%s", dbNode.Address)
+		nodes[i].Database = flags[dbNode.ID]
+		if dbNode.IsDown() {
+			nodes[i].State = "OFFLINE"
+		} else {
+			nodes[i].State = "ONLINE"
+		}
+	}
+
+	return SyncResponse(true, nodes)
+}
+
 var clusterNodeCmd = Command{name: "cluster/nodes/{name}", delete: clusterNodeDelete}
 
 func clusterNodeDelete(d *Daemon, r *http.Request) Response {
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index e4b58bb33..1c700ca94 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -80,6 +80,15 @@ func TestCluster_Join(t *testing.T) {
 	for _, daemon := range daemons {
 		assert.NotNil(t, daemon.externalAuth)
 	}
+
+	// The GetNodes client method returns both nodes.
+	nodes, err := client.GetNodes()
+	require.NoError(t, err)
+	assert.Len(t, nodes, 2)
+	assert.Equal(t, "buzz", nodes[0].Name)
+	assert.Equal(t, "rusp", nodes[1].Name)
+	assert.Equal(t, "ONLINE", nodes[0].State)
+	assert.Equal(t, "ONLINE", nodes[1].State)
 }
 
 // If the wrong trust password is given, the join request fails.
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 61339f650..b82cfde25 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -36,3 +36,11 @@ type RaftNode struct {
 	ID      int64  `json:"id" yaml:"id"`
 	Address string `json:"address" yaml:"address"`
 }
+
+// Node represents the a LXD node in the cluster.
+type Node struct {
+	Name     string `json:"name" yaml:"name"`
+	URL      string `json:"url" yaml:"url"`
+	Database bool   `json:"database" yaml:"database"`
+	State    string `json:"state" yaml:"state"`
+}
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 119529ed9..b73a0e51a 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -49,10 +49,14 @@ test_clustering() {
   ns5="${prefix}5"
   spawn_lxd_and_join_cluster "${ns5}" "${bridge}" "${cert}" 5 4 "${LXD_FIVE_DIR}"
 
+  # List all nodes
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster list | grep -q "ONLINE"
+
   # Shutdown a non-database node, and wait a few seconds so it will be
   # detected as down.
   LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown
-  sleep 5
+  sleep 22
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster list | grep "node5" | grep -q "OFFLINE"
 
   # Trying to delete the preseeded network now fails, because a node is degraded.
   ! LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"

From 7d8d780bb643fd5d5d2e3aa71b1757ea9a8bba35 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 7 Nov 2017 09:09:04 +0000
Subject: [PATCH 079/227] Add Gateway.LeaderAddress returning the address of
 the raft leader

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go      | 96 +++++++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/gateway_test.go |  8 ++++
 2 files changed, 104 insertions(+)

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index ae5aa01b9..c0ba5cfff 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -132,6 +132,17 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 			return
 		}
 
+		// Handle leader address requests.
+		if r.Method == "GET" {
+			leader, err := g.LeaderAddress()
+			if err != nil {
+				http.Error(w, "500 no elected leader", http.StatusInternalServerError)
+				return
+			}
+			util.WriteJSON(w, map[string]string{"leader": leader}, false)
+			return
+		}
+
 		g.server.ServeHTTP(w, r)
 	}
 	raft := func(w http.ResponseWriter, r *http.Request) {
@@ -263,6 +274,91 @@ func (g *Gateway) Reset(cert *shared.CertInfo) error {
 	return g.init()
 }
 
+// LeaderAddress returns the address of the current raft leader.
+func (g *Gateway) LeaderAddress() (string, error) {
+	// If we aren't clustered, return an error.
+	if g.memoryDial != nil {
+		return "", fmt.Errorf("node is not clustered")
+	}
+
+	ctx, cancel := context.WithTimeout(g.ctx, 5*time.Second)
+	defer cancel()
+
+	// If this is a raft node, return the address of the current leader, or
+	// wait a bit until one is elected.
+	if g.raft != nil {
+		for ctx.Err() == nil {
+			address := g.raft.Raft().Leader()
+			if address != "" {
+				return string(address), nil
+			}
+		}
+		return "", ctx.Err()
+
+	}
+
+	// If this isn't a raft node, contact a raft node and ask for the
+	// address of the current leader.
+	config, err := tlsClientConfig(g.cert)
+	if err != nil {
+		return "", err
+	}
+	addresses := []string{}
+	err = g.db.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		if err != nil {
+			return err
+		}
+		for _, node := range nodes {
+			addresses = append(addresses, node.Address)
+		}
+		return nil
+	})
+	if err != nil {
+		return "", errors.Wrap(err, "failed to fetch raft nodes addresses")
+	}
+
+	if len(addresses) == 0 {
+		// This should never happen because the raft_nodes table should
+		// be never empty for a clustered node, but check it for good
+		// measure.
+		return "", fmt.Errorf("no raft node known")
+	}
+
+	for _, address := range addresses {
+		url := fmt.Sprintf("https://%s%s", address, grpcEndpoint)
+		request, err := http.NewRequest("GET", url, nil)
+		if err != nil {
+			return "", err
+		}
+		request = request.WithContext(ctx)
+		client := &http.Client{Transport: &http.Transport{TLSClientConfig: config}}
+		response, err := client.Do(request)
+		if err != nil {
+			logger.Debugf("Failed to fetch leader address from %s", address)
+			continue
+		}
+		if response.StatusCode != http.StatusOK {
+			logger.Debugf("Request for leader address from %s failed", address)
+			continue
+		}
+		info := map[string]string{}
+		err = shared.ReadToJSON(response.Body, &info)
+		if err != nil {
+			logger.Debugf("Failed to parse leader address from %s", address)
+			continue
+		}
+		leader := info["leader"]
+		if leader == "" {
+			logger.Debugf("Raft node %s returned no leader address", address)
+			continue
+		}
+		return leader, nil
+	}
+
+	return "", fmt.Errorf("raft cluster is unavailable")
+}
+
 // Initialize the gateway, creating a new raft factory and gRPC server (if this
 // node is a database node), and a gRPC dialer.
 func (g *Gateway) init() error {
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
index 10536978b..3e1e904fa 100644
--- a/lxd/cluster/gateway_test.go
+++ b/lxd/cluster/gateway_test.go
@@ -41,6 +41,10 @@ func TestGateway_Single(t *testing.T) {
 	conn, err := dialer()
 	assert.NoError(t, err)
 	assert.NotNil(t, conn)
+
+	leader, err := gateway.LeaderAddress()
+	assert.Equal(t, "", leader)
+	assert.EqualError(t, err, "node is not clustered")
 }
 
 // If there's a network address configured, we expose the gRPC endpoint with
@@ -68,6 +72,10 @@ func TestGateway_SingleWithNetworkAddress(t *testing.T) {
 	conn, err := driver.Open("test.db")
 	require.NoError(t, err)
 	require.NoError(t, conn.Close())
+
+	leader, err := gateway.LeaderAddress()
+	require.NoError(t, err)
+	assert.Equal(t, address, leader)
 }
 
 // When networked, the grpc and raft endpoints requires the cluster

From 9d89e8304bce05939536bfa6c22bdf5b7a16e796 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 7 Nov 2017 11:57:11 +0000
Subject: [PATCH 080/227] Redirect to the raft leader all requests to accept a
 new node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go             | 24 ++++++++++++++++++++-
 lxd/cluster/gateway.go         | 49 ++++++++++++++++++++++++++----------------
 lxd/cluster/gateway_test.go    |  7 ++++++
 lxd/cluster/heartbeat_test.go  |  2 +-
 lxd/cluster/membership.go      | 31 ++++++++++++--------------
 lxd/cluster/membership_test.go | 34 ++++++++++-------------------
 lxd/response.go                | 11 +++++++++-
 test/suites/clustering.sh      | 10 +++++++--
 8 files changed, 104 insertions(+), 64 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index e31ac010e..f8cd22960 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -4,6 +4,7 @@ import (
 	"encoding/json"
 	"fmt"
 	"net/http"
+	"net/url"
 	"os"
 	"path/filepath"
 	"strconv"
@@ -16,6 +17,7 @@ import (
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/version"
 	"github.com/pkg/errors"
 )
@@ -164,6 +166,26 @@ func clusterNodesPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 }
 
 func clusterNodesPostAccept(d *Daemon, req api.ClusterPost) Response {
+	// Redirect all requests to the leader, which is the one with
+	// knowning what nodes are part of the raft cluster.
+	address, err := node.HTTPSAddress(d.db)
+	if err != nil {
+		return SmartError(err)
+	}
+	leader, err := d.gateway.LeaderAddress()
+	if err != nil {
+		return InternalError(err)
+	}
+	if address != leader {
+		logger.Debugf("Redirect node accept request to %s", leader)
+		url := &url.URL{
+			Scheme: "https",
+			Path:   "/1.0/cluster/nodes",
+			Host:   leader,
+		}
+		return SyncResponseRedirect(url.String())
+	}
+
 	// Accepting a node requires the client to provide the correct
 	// trust password.
 	secret, err := cluster.ConfigGetString(d.cluster, "core.trust_password")
@@ -173,7 +195,7 @@ func clusterNodesPostAccept(d *Daemon, req api.ClusterPost) Response {
 	if util.PasswordCheck(secret, req.TargetPassword) != nil {
 		return Forbidden
 	}
-	nodes, err := cluster.Accept(d.State(), req.Name, req.Address, req.Schema, req.API)
+	nodes, err := cluster.Accept(d.State(), d.gateway, req.Name, req.Address, req.Schema, req.API)
 	if err != nil {
 		return BadRequest(err)
 	}
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index c0ba5cfff..ceec8f5d9 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -93,27 +93,11 @@ type Gateway struct {
 // database node part of the dqlite cluster.
 func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 	grpc := func(w http.ResponseWriter, r *http.Request) {
-		if g.server == nil || g.memoryDial != nil {
-			http.NotFound(w, r)
-			return
-		}
-
 		if !tlsCheckCert(r, g.cert) {
 			http.Error(w, "403 invalid client certificate", http.StatusForbidden)
 			return
 		}
 
-		// Before actually establishing the gRPC SQL connection, our
-		// dialer probes the node to see if it's currently the leader
-		// (otherwise it tries with another node or retry later).
-		if r.Method == "HEAD" {
-			if g.raft.Raft().State() != raft.Leader {
-				http.Error(w, "503 not leader", http.StatusServiceUnavailable)
-				return
-			}
-			return
-		}
-
 		// Handle heatbeats.
 		if r.Method == "PUT" {
 			var nodes []db.RaftNode
@@ -132,6 +116,23 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 			return
 		}
 
+		// From here on we require that this node is part of the raft cluster.
+		if g.server == nil || g.memoryDial != nil {
+			http.NotFound(w, r)
+			return
+		}
+
+		// Before actually establishing the gRPC SQL connection, our
+		// dialer probes the node to see if it's currently the leader
+		// (otherwise it tries with another node or retry later).
+		if r.Method == "HEAD" {
+			if g.raft.Raft().State() != raft.Leader {
+				http.Error(w, "503 not leader", http.StatusServiceUnavailable)
+				return
+			}
+			return
+		}
+
 		// Handle leader address requests.
 		if r.Method == "GET" {
 			leader, err := g.LeaderAddress()
@@ -288,10 +289,11 @@ func (g *Gateway) LeaderAddress() (string, error) {
 	// wait a bit until one is elected.
 	if g.raft != nil {
 		for ctx.Err() == nil {
-			address := g.raft.Raft().Leader()
+			address := string(g.raft.Raft().Leader())
 			if address != "" {
-				return string(address), nil
+				return address, nil
 			}
+			time.Sleep(time.Second)
 		}
 		return "", ctx.Err()
 
@@ -388,6 +390,9 @@ func (g *Gateway) init() error {
 
 		g.server = server
 		g.raft = raft
+	} else {
+		g.server = nil
+		g.raft = nil
 	}
 	return nil
 }
@@ -420,7 +425,13 @@ func (g *Gateway) currentRaftNodes() ([]db.RaftNode, error) {
 	for i, server := range servers {
 		address, err := provider.ServerAddr(server.ID)
 		if err != nil {
-			return nil, errors.Wrap(err, "failed to fetch raft server address")
+			if err != db.NoSuchObjectError {
+				return nil, errors.Wrap(err, "failed to fetch raft server address")
+			}
+			// Use the initial address as fallback. This is an edge
+			// case that happens when a new leader is elected and
+			// its raft_nodes table is not fully up-to-date yet.
+			address = server.Address
 		}
 		id, err := strconv.Atoi(string(server.ID))
 		if err != nil {
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
index 3e1e904fa..48d074bca 100644
--- a/lxd/cluster/gateway_test.go
+++ b/lxd/cluster/gateway_test.go
@@ -1,6 +1,8 @@
 package cluster_test
 
 import (
+	"crypto/tls"
+	"crypto/x509"
 	"fmt"
 	"net/http"
 	"net/http/httptest"
@@ -31,8 +33,13 @@ func TestGateway_Single(t *testing.T) {
 	handlerFuncs := gateway.HandlerFuncs()
 	assert.Len(t, handlerFuncs, 2)
 	for endpoint, f := range handlerFuncs {
+		c, err := x509.ParseCertificate(cert.KeyPair().Certificate[0])
+		require.NoError(t, err)
 		w := httptest.NewRecorder()
 		r := &http.Request{}
+		r.TLS = &tls.ConnectionState{
+			PeerCertificates: []*x509.Certificate{c},
+		}
 		f(w, r)
 		assert.Equal(t, 404, w.Code, endpoint)
 	}
diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
index 1e78496f3..b40d4292e 100644
--- a/lxd/cluster/heartbeat_test.go
+++ b/lxd/cluster/heartbeat_test.go
@@ -135,7 +135,7 @@ func (f *heartbeatFixture) Grow() *cluster.Gateway {
 	targetState := f.states[target]
 
 	nodes, err := cluster.Accept(
-		targetState, name, address, cluster.SchemaVersion, len(version.APIExtensions))
+		targetState, target, name, address, cluster.SchemaVersion, len(version.APIExtensions))
 
 	err = cluster.Join(state, gateway, target.Cert(), name, nodes)
 	require.NoError(f.t, err)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 80631d02c..d6bebe4db 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -136,7 +136,7 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 //
 // Return an updated list raft database nodes (possibly including the newly
 // accepted node).
-func Accept(state *state.State, name, address string, schema, api int) ([]db.RaftNode, error) {
+func Accept(state *state.State, gateway *Gateway, name, address string, schema, api int) ([]db.RaftNode, error) {
 	// Check parameters
 	if name == "" {
 		return nil, fmt.Errorf("node name must not be empty")
@@ -166,25 +166,22 @@ func Accept(state *state.State, name, address string, schema, api int) ([]db.Raf
 
 	// Possibly insert the new node into the raft_nodes table (if we have
 	// less than 3 database nodes).
-	var nodes []db.RaftNode
-	err = state.Node.Transaction(func(tx *db.NodeTx) error {
-		var err error
-		nodes, err = tx.RaftNodes()
-		if err != nil {
-			return errors.Wrap(err, "failed to fetch current raft nodes")
-		}
-		if len(nodes) >= membershipMaxRaftNodes {
+	nodes, err := gateway.currentRaftNodes()
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to get raft nodes from the log")
+	}
+	if len(nodes) < membershipMaxRaftNodes {
+		err = state.Node.Transaction(func(tx *db.NodeTx) error {
+			id, err := tx.RaftNodeAdd(address)
+			if err != nil {
+				return err
+			}
+			nodes = append(nodes, db.RaftNode{ID: id, Address: address})
 			return nil
-		}
-		id, err := tx.RaftNodeAdd(address)
+		})
 		if err != nil {
-			return err
+			return nil, errors.Wrap(err, "failed to insert new node into raft_nodes")
 		}
-		nodes = append(nodes, db.RaftNode{ID: id, Address: address})
-		return nil
-	})
-	if err != nil {
-		return nil, err
 	}
 
 	return nodes, nil
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index bfa5cce8f..b454e7824 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -193,9 +193,13 @@ func TestAccept_UnmetPreconditions(t *testing.T) {
 			state, cleanup := state.NewTestState(t)
 			defer cleanup()
 
+			cert := shared.TestingKeyPair()
+			gateway := newGateway(t, state.Node, cert)
+			defer gateway.Shutdown()
+
 			c.setup(&membershipFixtures{t: t, state: state})
 
-			_, err := cluster.Accept(state, c.name, c.address, c.schema, c.api)
+			_, err := cluster.Accept(state, gateway, c.name, c.address, c.schema, c.api)
 			assert.EqualError(t, err, c.error)
 		})
 	}
@@ -206,12 +210,16 @@ func TestAccept(t *testing.T) {
 	state, cleanup := state.NewTestState(t)
 	defer cleanup()
 
+	cert := shared.TestingKeyPair()
+	gateway := newGateway(t, state.Node, cert)
+	defer gateway.Shutdown()
+
 	f := &membershipFixtures{t: t, state: state}
 	f.RaftNode("1.2.3.4:666")
 	f.ClusterNode("1.2.3.4:666")
 
 	nodes, err := cluster.Accept(
-		state, "buzz", "5.6.7.8:666", cluster.SchemaVersion, len(version.APIExtensions))
+		state, gateway, "buzz", "5.6.7.8:666", cluster.SchemaVersion, len(version.APIExtensions))
 	assert.NoError(t, err)
 	assert.Len(t, nodes, 2)
 	assert.Equal(t, int64(1), nodes[0].ID)
@@ -220,26 +228,6 @@ func TestAccept(t *testing.T) {
 	assert.Equal(t, "5.6.7.8:666", nodes[1].Address)
 }
 
-// If the cluster has already reached its maximum number of raft nodes, the
-// joining node is not included in the returned raft nodes list.
-func TestAccept_MaxRaftNodes(t *testing.T) {
-	state, cleanup := state.NewTestState(t)
-	defer cleanup()
-
-	f := &membershipFixtures{t: t, state: state}
-	f.RaftNode("1.1.1.1:666")
-	f.RaftNode("2.2.2.2:666")
-	f.RaftNode("3.3.3.3:666")
-	f.ClusterNode("1.2.3.4:666")
-
-	nodes, err := cluster.Accept(
-		state, "buzz", "4.5.6.7:666", cluster.SchemaVersion, len(version.APIExtensions))
-	assert.NoError(t, err)
-	for _, node := range nodes {
-		assert.NotEqual(t, "4.5.6.7:666", node.Address)
-	}
-}
-
 func TestJoin(t *testing.T) {
 	// Setup a target node running as leader of a cluster.
 	targetCert := shared.TestingKeyPair()
@@ -295,7 +283,7 @@ func TestJoin(t *testing.T) {
 
 	// Accept the joining node.
 	raftNodes, err := cluster.Accept(
-		targetState, "rusp", address, cluster.SchemaVersion, len(version.APIExtensions))
+		targetState, targetGateway, "rusp", address, cluster.SchemaVersion, len(version.APIExtensions))
 	require.NoError(t, err)
 
 	// Actually join the cluster.
diff --git a/lxd/response.go b/lxd/response.go
index 41629738e..73d8540c1 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -30,6 +30,7 @@ type syncResponse struct {
 	etag     interface{}
 	metadata interface{}
 	location string
+	code     int
 	headers  map[string]string
 }
 
@@ -56,7 +57,11 @@ func (r *syncResponse) Render(w http.ResponseWriter) error {
 
 	if r.location != "" {
 		w.Header().Set("Location", r.location)
-		w.WriteHeader(201)
+		code := r.code
+		if code == 0 {
+			code = 201
+		}
+		w.WriteHeader(code)
 	}
 
 	resp := api.ResponseRaw{
@@ -90,6 +95,10 @@ func SyncResponseLocation(success bool, metadata interface{}, location string) R
 	return &syncResponse{success: success, metadata: metadata, location: location}
 }
 
+func SyncResponseRedirect(address string) Response {
+	return &syncResponse{success: true, location: address, code: http.StatusPermanentRedirect}
+}
+
 func SyncResponseHeaders(success bool, metadata interface{}, headers map[string]string) Response {
 	return &syncResponse{success: success, metadata: metadata, headers: headers}
 }
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index b73a0e51a..80411bad0 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -49,8 +49,14 @@ test_clustering() {
   ns5="${prefix}5"
   spawn_lxd_and_join_cluster "${ns5}" "${bridge}" "${cert}" 5 4 "${LXD_FIVE_DIR}"
 
-  # List all nodes
-  LXD_DIR="${LXD_THREE_DIR}" lxc cluster list | grep -q "ONLINE"
+  # List all nodes, using clients points to different nodes and
+  # checking which are database nodes and which are not.
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster list
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster list | grep "node1" | grep -q "YES"
+  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster list | grep "node2" | grep -q "YES"
+  LXD_DIR="${LXD_ONE_DIR}" lxc cluster list | grep "node3" | grep -q "YES"
+  LXD_DIR="${LXD_TWO_DIR}" lxc cluster list | grep "node4" | grep -q "NO"
+  LXD_DIR="${LXD_FIVE_DIR}" lxc cluster list | grep "node5" | grep -q "NO"
 
   # Shutdown a non-database node, and wait a few seconds so it will be
   # detected as down.

From a2686042bfc0678ae119494cab743920bddfec55 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 9 Nov 2017 09:07:01 +0000
Subject: [PATCH 081/227] Document new clustering-related public REST APIs

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 doc/api-extensions.md |  13 ++++
 doc/rest-api.md       | 194 ++++++++++++++++++++++++++++++++++++++++++++++++++
 shared/api/cluster.go |   4 +-
 3 files changed, 209 insertions(+), 2 deletions(-)

diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 1ba3db34d..0c2983f5c 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -395,3 +395,16 @@ getting a stream of events over websocket.
 ## proxy
 This adds a new `proxy` device type to containers, allowing forwarding
 of connections between the host and container.
+
+## clustering
+Clustering API for LXD.
+
+This includes the following new endpoints:
+
+* `GET /1.0/cluster`
+* `DELETE /1.0/cluster` (see [RESTful API](rest-api.md) for details)
+
+* `GET /1.0/cluster/nodes`
+* `POST /1.0/cluster/nodes` (see [RESTful API](rest-api.md) for details)
+
+* `DELETE /1.0/cluster/nodes/<name>` (see [RESTful API](rest-api.md) for details)
diff --git a/doc/rest-api.md b/doc/rest-api.md
index 1c082c12f..89d0a6ed3 100644
--- a/doc/rest-api.md
+++ b/doc/rest-api.md
@@ -2410,3 +2410,197 @@ Input (none at present):
             }
         }
     }
+## `/1.0/storage-pools`
+### GET
+ * Description: list of storage pools
+ * Introduced: with API extension `storage`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: list of storage pools that are currently defined on the host
+
+    [
+        "/1.0/storage-pools/default",
+        "/1.0/storage-pools/pool1"
+        "/1.0/storage-pools/pool2"
+        "/1.0/storage-pools/pool3"
+        "/1.0/storage-pools/pool4"
+    ]
+
+### POST
+ * Description: create a new storage pool
+ * Introduced: with API extension `storage`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+Input:
+
+    {
+        "config": {
+            "size": "10GB"
+        },
+        "driver": "zfs",
+        "name": "pool1"
+    }
+
+## `/1.0/cluster`
+### GET (optional `?password=<trust-password>`)
+ * Description: information about a cluster (such as networks and storage pools)
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted or untrusted
+ * Operation: sync
+ * Return: dict representing a cluster
+
+    {
+        "type": "sync",
+        "status": "Success",
+        "status_code": 200,
+        "operation": "",
+        "error_code": 0,
+        "error": "",
+        "metadata": {
+            "storage_pools": [
+                {
+                    "name": "default",
+                    "description": "",
+                    "config": {
+                        "source": "/var/lib/lxd/storage-pools/default"
+                    },
+                    "driver": "dir",
+                    "used_by": null
+                }
+            ],
+            "networks": [
+                {
+                    "name": "lxdbr0",
+                    "description": "",
+                    "type": "bridge",
+                    "config": {
+                        "ipv4.address": "10.8.219.1/24",
+                        "ipv4.nat": "true",
+                        "ipv6.address": "fd42:f5a2:e47e:2185::1/64",
+                        "ipv6.nat": "true"
+                    },
+                    "used_by": null,
+                    "managed": true
+                }
+            ]
+    	}
+    }
+
+### DELETE
+ * Description: disable clustering
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+Input (none at present):
+
+    {
+    }
+
+## `/1.0/cluster/nodes`
+### GET
+ * Description: list of LXD nodes in the cluster
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: list of dicts with information about each node
+
+	{
+		"type": "sync",
+		"status": "Success",
+		"status_code": 200,
+		"operation": "",
+		"error_code": 0,
+		"error": "",
+		"metadata": [
+			{
+				"name": "lxd1",
+				"url": "https://10.1.1.101:8443",
+				"database": true,
+				"state": "ONLINE"
+			},
+			{
+				"name": "lxd2",
+				"url": "https://10.1.1.102:8443",
+				"database": true,
+				"state": "ONLINE"
+			},
+		]
+	} 
+
+### POST
+ * Description: bootstrap, join, or accept a node in the cluster
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted or untrusted
+ * Operation: sync or async
+ * Return: various payloads depending on the input
+
+Input (bootstrap a new cluster):
+
+    {
+		"name": "lxd1",
+	}
+
+Return background operation or standard error.
+
+Input (request to join an existing cluster):
+
+	{
+		"name": "node2",
+		"target_address": "10.1.1.101:8443",
+		"target_cert": "-----BEGIN CERTIFICATE-----MIFf\n-----END CERTIFICATE-----",
+		"target_password": "sekret"
+	}
+
+Return background operation or standard error.
+
+Input (accept a node requesting to join the cluster):
+
+	{
+		"name": "node2",
+		"address": "10.1.1.102:8443",
+		"schema": 2,
+		"api": 63,
+		"target_password": "sekret"
+	}
+
+Return information about raft nodes in the cluster and the private key
+of the cluster certificate:
+
+	{
+		"type": "sync",
+		"status": "Success",
+		"status_code": 200,
+		"operation": "",
+		"error_code": 0,
+		"error": "",
+		"metadata": {
+			"raft_nodes": [
+				{
+					"id": 1,
+					"address": "10.1.1.101:8443"
+				},
+				{
+					"id": 2,
+					"address": "10.1.1.102:8443"
+				}
+			],
+			"private_key": "LS0tLS1CRU"
+		}
+	}
+
+## `/1.0/cluster/nodes/<name>`
+### DELETE (optional `?force=1`)
+ * Description: remove a node from the cluster
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted
+ * Operation: async
+ * Return: background operation or standard error
+
+Input (none at present):
+
+    {
+    }
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index b82cfde25..5b653e869 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -2,8 +2,8 @@ package api
 
 // Cluster represents high-level information about a LXD cluster.
 type Cluster struct {
-	StoragePools []StoragePool
-	Networks     []Network
+	StoragePools []StoragePool `json:"storage_pools" yaml:"storage_pools"`
+	Networks     []Network     `json:"networks" yaml:"networks"`
 }
 
 // ClusterPost represents the fields required to bootstrap or join a LXD

From 7fc67f0655cc0b80cb9f2ce0bb9fe16ca23c452e Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 9 Nov 2017 11:46:53 +0000
Subject: [PATCH 082/227] Sanity check that cluster notifications use the
 cluster certificate

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go | 24 ++++++++++++++++++------
 1 file changed, 18 insertions(+), 6 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 798fe462f..5cc19569f 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -121,6 +121,23 @@ type Command struct {
 
 // Check whether the request comes from a trusted client.
 func (d *Daemon) checkTrustedClient(r *http.Request) error {
+	// Check the cluster certificate first, so we return an error if the
+	// notification header is set but the client is not presenting the
+	// cluster certificate (iow this request does not appear to come from a
+	// cluster node).
+	cert, _ := x509.ParseCertificate(d.endpoints.NetworkCert().KeyPair().Certificate[0])
+	clusterCerts := []x509.Certificate{*cert}
+	if r.TLS != nil {
+		for i := range r.TLS.PeerCertificates {
+			if util.CheckTrustState(*r.TLS.PeerCertificates[i], clusterCerts) {
+				return nil
+			}
+		}
+	}
+	if isClusterNotification(r) {
+		return fmt.Errorf("cluster notification not using cluster certificate")
+	}
+
 	if r.RemoteAddr == "@" {
 		// Unix socket
 		return nil
@@ -139,13 +156,8 @@ func (d *Daemon) checkTrustedClient(r *http.Request) error {
 		return err
 	}
 
-	// Add the server or cluster certificate to the list of trusted ones.
-	cert, _ := x509.ParseCertificate(d.endpoints.NetworkCert().KeyPair().Certificate[0])
-	certs := d.clientCerts
-	certs = append(certs, *cert)
-
 	for i := range r.TLS.PeerCertificates {
-		if util.CheckTrustState(*r.TLS.PeerCertificates[i], certs) {
+		if util.CheckTrustState(*r.TLS.PeerCertificates[i], d.clientCerts) {
 			return nil
 		}
 	}

From 6daf0d4587c0741511e3b15955df928b03e4d4b2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 13 Nov 2017 13:10:50 +0000
Subject: [PATCH 083/227] Add GET node rest API

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go    |  1 +
 client/lxd_cluster.go   | 13 +++++++++++++
 doc/api-extensions.md   |  1 +
 doc/rest-api.md         | 25 +++++++++++++++++++++++++
 lxd/api_cluster.go      | 43 ++++++++++++++++++++++++++++++++++++++++++-
 lxd/api_cluster_test.go |  5 +++++
 6 files changed, 87 insertions(+), 1 deletion(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 4b34b0c85..e67a2b59e 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -168,6 +168,7 @@ type ContainerServer interface {
 	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
 	LeaveCluster(name string, force bool) (op *Operation, err error)
 	GetNodes() (nodes []api.Node, err error)
+	GetNode(name string) (node *api.Node, err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 5d702459c..1bb302dc9 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -97,3 +97,16 @@ func (r *ProtocolLXD) GetNodes() ([]api.Node, error) {
 
 	return nodes, nil
 }
+
+// GetNode returns information about the given node.
+func (r *ProtocolLXD) GetNode(name string) (*api.Node, error) {
+	node := api.Node{}
+	path := fmt.Sprintf("/cluster/nodes/%s", name)
+	_, err := r.queryStruct("GET", path, nil, "", &node)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return &node, nil
+}
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 0c2983f5c..785d12e47 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -407,4 +407,5 @@ This includes the following new endpoints:
 * `GET /1.0/cluster/nodes`
 * `POST /1.0/cluster/nodes` (see [RESTful API](rest-api.md) for details)
 
+* `GET /1.0/cluster/nodes/<name>` (see [RESTful API](rest-api.md) for details)
 * `DELETE /1.0/cluster/nodes/<name>` (see [RESTful API](rest-api.md) for details)
diff --git a/doc/rest-api.md b/doc/rest-api.md
index 89d0a6ed3..3df4dd58b 100644
--- a/doc/rest-api.md
+++ b/doc/rest-api.md
@@ -2593,6 +2593,31 @@ of the cluster certificate:
 	}
 
 ## `/1.0/cluster/nodes/<name>`
+### GET
+ * Description: retrieve the node's information and status
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: dict representing the node
+
+    {
+        "type": "sync",
+        "status": "Success",
+        "status_code": 200,
+        "error_code": 0,
+        "error": "",
+        "metadata": {
+            "type": "custom",
+            "used_by": [],
+            "name": "vol1",
+            "config": {
+                "block.filesystem": "ext4",
+                "block.mount_options": "discard",
+                "size": "10737418240"
+            }
+        }
+    }
+
 ### DELETE (optional `?force=1`)
  * Description: remove a node from the cluster
  * Introduced: with API extension `clustering`
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index f8cd22960..24e3776bd 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -296,7 +296,48 @@ func clusterNodesGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, nodes)
 }
 
-var clusterNodeCmd = Command{name: "cluster/nodes/{name}", delete: clusterNodeDelete}
+var clusterNodeCmd = Command{
+	name:   "cluster/nodes/{name}",
+	get:    clusterNodeGet,
+	delete: clusterNodeDelete,
+}
+
+func clusterNodeGet(d *Daemon, r *http.Request) Response {
+	name := mux.Vars(r)["name"]
+	node := api.Node{Name: name}
+	address := ""
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		dbNode, err := tx.NodeByName(name)
+		if err != nil {
+			return err
+		}
+		address = dbNode.Address
+		node.URL = fmt.Sprintf("https://%s", dbNode.Address)
+		if dbNode.IsDown() {
+			node.State = "OFFLINE"
+		} else {
+			node.State = "ONLINE"
+		}
+		return nil
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+
+	// Figure out if this node is currently a database node.
+	err = d.db.Transaction(func(tx *db.NodeTx) error {
+		addresses, err := tx.RaftNodeAddresses()
+		if err != nil {
+			return err
+		}
+		if shared.StringInSlice(address, addresses) {
+			node.Database = true
+		}
+		return nil
+	})
+
+	return SyncResponse(true, node)
+}
 
 func clusterNodeDelete(d *Daemon, r *http.Request) Response {
 	force, err := strconv.Atoi(r.FormValue("force"))
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 1c700ca94..8d7c3a944 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -89,6 +89,11 @@ func TestCluster_Join(t *testing.T) {
 	assert.Equal(t, "rusp", nodes[1].Name)
 	assert.Equal(t, "ONLINE", nodes[0].State)
 	assert.Equal(t, "ONLINE", nodes[1].State)
+
+	// The GetNode method returns the requested node.
+	node, err := client.GetNode("buzz")
+	require.NoError(t, err)
+	assert.Equal(t, "buzz", node.Name)
 }
 
 // If the wrong trust password is given, the join request fails.

From 6a82e18162ee3e7fb097f181681f96299fabfac5 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 14 Nov 2017 08:52:53 +0000
Subject: [PATCH 084/227] Rename lxc cluster remove to lxc cluster delete

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/cluster.go            | 14 ++++++++------
 test/suites/clustering.sh |  4 ++--
 2 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/lxc/cluster.go b/lxc/cluster.go
index 8cac3c4fa..f913cba20 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -21,9 +21,11 @@ func (c *clusterCmd) usage() string {
 
 Manage cluster nodes.
 
-*Cluster nodes*
-lxc cluster remove <node> [--force]
-    Remove a node from the cluster.`)
+lxc cluster list [<remote>:]
+    List all nodes in the cluster.
+
+lxc cluster delete [<remote>:]<node> [--force]
+    Delete a node from the cluster.`)
 }
 
 func (c *clusterCmd) flags() {
@@ -43,14 +45,14 @@ func (c *clusterCmd) run(conf *config.Config, args []string) error {
 		return c.doClusterList(conf, args)
 	}
 
-	if args[0] == "remove" {
-		return c.doClusterNodeRemove(conf, args)
+	if args[0] == "delete" {
+		return c.doClusterNodeDelete(conf, args)
 	}
 
 	return nil
 }
 
-func (c *clusterCmd) doClusterNodeRemove(conf *config.Config, args []string) error {
+func (c *clusterCmd) doClusterNodeDelete(conf *config.Config, args []string) error {
 	if len(args) < 2 {
 		return errArgs
 	}
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 80411bad0..5d240394b 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -68,14 +68,14 @@ test_clustering() {
   ! LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
   # Force the removal of the degraded node.
-  LXD_DIR="${LXD_THREE_DIR}" lxc cluster remove node5 --force
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster delete node5 --force
 
   # Now the preseeded network can be deleted, and all nodes are
   # notified.
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
   # Remove a node gracefully.
-  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster remove node4
+  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster delete node4
 
   LXD_DIR="${LXD_FOUR_DIR}" lxd shutdown
   LXD_DIR="${LXD_THREE_DIR}" lxd shutdown

From c4c73d21ee13cf16c637f35edc3b096555ce1ca1 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 14 Nov 2017 09:03:17 +0000
Subject: [PATCH 085/227] Add lxc cluster show command

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/cluster.go            | 40 ++++++++++++++++++++++++++++++++++++++++
 test/suites/clustering.sh |  3 +++
 2 files changed, 43 insertions(+)

diff --git a/lxc/cluster.go b/lxc/cluster.go
index f913cba20..bca261ed6 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -5,6 +5,8 @@ import (
 	"os"
 	"sort"
 
+	yaml "gopkg.in/yaml.v2"
+
 	"github.com/lxc/lxd/lxc/config"
 	"github.com/lxc/lxd/shared/gnuflag"
 	"github.com/lxc/lxd/shared/i18n"
@@ -24,6 +26,9 @@ Manage cluster nodes.
 lxc cluster list [<remote>:]
     List all nodes in the cluster.
 
+lxc cluster show [<remote>:]<node>
+    Show details of a node.
+
 lxc cluster delete [<remote>:]<node> [--force]
     Delete a node from the cluster.`)
 }
@@ -45,6 +50,10 @@ func (c *clusterCmd) run(conf *config.Config, args []string) error {
 		return c.doClusterList(conf, args)
 	}
 
+	if args[0] == "show" {
+		return c.doClusterNodeShow(conf, args)
+	}
+
 	if args[0] == "delete" {
 		return c.doClusterNodeDelete(conf, args)
 	}
@@ -52,6 +61,37 @@ func (c *clusterCmd) run(conf *config.Config, args []string) error {
 	return nil
 }
 
+func (c *clusterCmd) doClusterNodeShow(conf *config.Config, args []string) error {
+	if len(args) < 2 {
+		return errArgs
+	}
+
+	// [[lxc cluster]] remove production:bionic-1
+	remote, name, err := conf.ParseRemote(args[1])
+	if err != nil {
+		return err
+	}
+
+	client, err := conf.GetContainerServer(remote)
+	if err != nil {
+		return err
+	}
+
+	node, err := client.GetNode(name)
+	if err != nil {
+		return err
+	}
+
+	data, err := yaml.Marshal(&node)
+	if err != nil {
+		return err
+	}
+
+	fmt.Printf("%s", data)
+
+	return nil
+}
+
 func (c *clusterCmd) doClusterNodeDelete(conf *config.Config, args []string) error {
 	if len(args) < 2 {
 		return errArgs
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 5d240394b..d0ecc5f67 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -58,6 +58,9 @@ test_clustering() {
   LXD_DIR="${LXD_TWO_DIR}" lxc cluster list | grep "node4" | grep -q "NO"
   LXD_DIR="${LXD_FIVE_DIR}" lxc cluster list | grep "node5" | grep -q "NO"
 
+  # Show a single node
+  LXD_DIR="${LXD_TWO_DIR}" lxc cluster show node5 | grep -q "node5"
+
   # Shutdown a non-database node, and wait a few seconds so it will be
   # detected as down.
   LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown

From 3fd95393865ab955f3efb242626d98a0d3143424 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 14 Nov 2017 10:22:59 +0000
Subject: [PATCH 086/227] Notify new client certificates

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/certificates.go       | 29 ++++++++++++++++++++++++++---
 test/suites/clustering.sh |  5 +++++
 2 files changed, 31 insertions(+), 3 deletions(-)

diff --git a/lxd/certificates.go b/lxd/certificates.go
index e51e6a88f..c2420a381 100644
--- a/lxd/certificates.go
+++ b/lxd/certificates.go
@@ -11,6 +11,7 @@ import (
 
 	"github.com/gorilla/mux"
 
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
@@ -148,9 +149,31 @@ func certificatesPost(d *Daemon, r *http.Request) Response {
 		}
 	}
 
-	err = saveCert(d.cluster, name, cert)
-	if err != nil {
-		return SmartError(err)
+	if !isClusterNotification(r) {
+		// Store the certificate in the cluster database.
+		err = saveCert(d.cluster, name, cert)
+		if err != nil {
+			return SmartError(err)
+		}
+
+		// Notify other nodes about the new certificate.
+		notifier, err := cluster.NewNotifier(
+			d.State(), d.endpoints.NetworkCert(), cluster.NotifyAlive)
+		if err != nil {
+			return SmartError(err)
+		}
+		req := api.CertificatesPost{
+			Certificate: base64.StdEncoding.EncodeToString(cert.Raw),
+		}
+		req.Name = name
+		req.Type = "client"
+
+		err = notifier(func(client lxd.ContainerServer) error {
+			return client.CreateCertificate(req)
+		})
+		if err != nil {
+			return SmartError(err)
+		}
 	}
 
 	d.clientCerts = append(d.clientCerts, *cert)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index d0ecc5f67..108a80e16 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -61,6 +61,11 @@ test_clustering() {
   # Show a single node
   LXD_DIR="${LXD_TWO_DIR}" lxc cluster show node5 | grep -q "node5"
 
+  # Client certificate are shared across all nodes.
+  LXD_DIR="${LXD_ONE_DIR}" lxc remote add cluster 10.1.1.101:8443 --accept-certificate --password=sekret
+  LXD_DIR="${LXD_ONE_DIR}" lxc remote set-url cluster https://10.1.1.102:8443
+  lxc network list cluster: | grep -q "${bridge}"
+
   # Shutdown a non-database node, and wait a few seconds so it will be
   # detected as down.
   LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown

From 4142a2b173094df2fa3af37c1406ece8b295067c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 14 Nov 2017 20:52:45 +0000
Subject: [PATCH 087/227] Add operations table

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go     |  2 +-
 lxd/db/cluster/schema.go      |  9 ++++-
 lxd/db/cluster/update.go      | 16 +++++++-
 lxd/db/cluster/update_test.go | 25 +++++++++++++
 lxd/db/containers.go          |  2 +-
 lxd/db/db.go                  | 22 ++++++-----
 lxd/db/images.go              |  2 +-
 lxd/db/networks.go            | 12 +++---
 lxd/db/node.go                |  4 +-
 lxd/db/operations.go          | 87 +++++++++++++++++++++++++++++++++++++++++++
 lxd/db/operations_test.go     | 33 ++++++++++++++++
 lxd/db/storage_pools.go       | 14 +++----
 lxd/db/storage_volumes.go     |  2 +-
 lxd/db/testing.go             |  2 +-
 lxd/db/transaction.go         |  3 +-
 15 files changed, 202 insertions(+), 33 deletions(-)
 create mode 100644 lxd/db/operations.go
 create mode 100644 lxd/db/operations_test.go

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index d6bebe4db..00edaf924 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -299,7 +299,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 		if err != nil {
 			return errors.Wrap(err, "failed to get ID of joining node")
 		}
-		state.Cluster.ID(node.ID)
+		state.Cluster.NodeID(node.ID)
 
 		// Storage pools.
 		ids, err := tx.StoragePoolIDs()
diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index cfd5ddbd7..a38e7895c 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -151,6 +151,13 @@ CREATE TABLE nodes (
     UNIQUE (name),
     UNIQUE (address)
 );
+CREATE TABLE operations (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    uuid TEXT NOT NULL,
+    node_id TEXT NOT NULL,
+    UNIQUE (uuid),
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
 CREATE TABLE profiles (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
@@ -218,5 +225,5 @@ CREATE TABLE storage_volumes_config (
     FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
 
-INSERT INTO schema (version, updated_at) VALUES (2, strftime("%s"))
+INSERT INTO schema (version, updated_at) VALUES (3, strftime("%s"))
 `
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 61c52750c..0fc4f7df0 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -25,10 +25,24 @@ var SchemaVersion = len(updates)
 var updates = map[int]schema.Update{
 	1: updateFromV0,
 	2: updateFromV1,
+	3: updateFromV2,
+}
+
+func updateFromV2(tx *sql.Tx) error {
+	stmt := `
+CREATE TABLE operations (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    uuid TEXT NOT NULL,
+    node_id TEXT NOT NULL,
+    UNIQUE (uuid),
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+`
+	_, err := tx.Exec(stmt)
+	return err
 }
 
 func updateFromV1(tx *sql.Tx) error {
-	// config table
 	stmt := `
 CREATE TABLE certificates (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index 463d5d8f0..8f43a1df1 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -180,3 +180,28 @@ func testConfigTable(t *testing.T, table string, setup func(db *sql.DB)) {
 	require.NoError(t, err)
 	assert.Equal(t, int64(0), n) // The row was already deleted by the previous query
 }
+
+func TestUpdateFromV2(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(3, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'one', '', '1.1.1.1', 666, 999, ?)", time.Now())
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO operations VALUES (1, 'abcd', 1)")
+	require.NoError(t, err)
+
+	// Unique constraint on uuid
+	_, err = db.Exec("INSERT INTO operations VALUES (2, 'abcd', 1)")
+	require.Error(t, err)
+
+	// Cascade delete on node_id
+	_, err = db.Exec("DELETE FROM nodes")
+	require.NoError(t, err)
+	result, err := db.Exec("DELETE FROM operations")
+	require.NoError(t, err)
+	n, err := result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(0), n)
+}
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 337a432a1..1cd19a33f 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -166,7 +166,7 @@ func (c *Cluster) ContainerCreate(args ContainerArgs) (int, error) {
 		return 0, err
 	}
 	defer stmt.Close()
-	result, err := stmt.Exec(c.id, args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix(), args.LastUsedDate.Unix(), statefulInt)
+	result, err := stmt.Exec(c.nodeID, args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix(), args.LastUsedDate.Unix(), statefulInt)
 	if err != nil {
 		tx.Rollback()
 		return 0, err
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 97d1f9173..6bcde122e 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -140,8 +140,8 @@ func (n *Node) Begin() (*sql.Tx, error) {
 
 // Cluster mediates access to LXD's data stored in the cluster dqlite database.
 type Cluster struct {
-	db *sql.DB // Handle to the cluster dqlite database, gated behind gRPC SQL.
-	id int64   // Node ID of this LXD instance.
+	db     *sql.DB // Handle to the cluster dqlite database, gated behind gRPC SQL.
+	nodeID int64   // Node ID of this LXD instance.
 }
 
 // OpenCluster creates a new Cluster object for interacting with the dqlite
@@ -180,12 +180,12 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 		}
 		if len(nodes) == 1 && nodes[0].Address == "0.0.0.0" {
 			// We're not clustered
-			cluster.ID(1)
+			cluster.NodeID(1)
 			return nil
 		}
 		for _, node := range nodes {
 			if node.Address == address {
-				cluster.id = node.ID
+				cluster.nodeID = node.ID
 				return nil
 			}
 		}
@@ -210,7 +210,9 @@ func ForLocalInspection(db *sql.DB) *Cluster {
 // returns no error, all database changes are committed to the cluster database
 // database, otherwise they are rolled back.
 func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
-	clusterTx := &ClusterTx{}
+	clusterTx := &ClusterTx{
+		nodeID: c.nodeID,
+	}
 
 	// FIXME: the retry loop should be configurable.
 	var err error
@@ -229,12 +231,12 @@ func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 	return err
 }
 
-// ID sets the the node ID associated with this cluster instance. It's used for
+// NodeID sets the the node NodeID associated with this cluster instance. It's used for
 // backward-compatibility of all db-related APIs that were written before
-// clustering and don't accept a node ID, so in those cases we automatically
-// use this value as implict node ID.
-func (c *Cluster) ID(id int64) {
-	c.id = id
+// clustering and don't accept a node NodeID, so in those cases we automatically
+// use this value as implict node NodeID.
+func (c *Cluster) NodeID(id int64) {
+	c.nodeID = id
 }
 
 // Close the database facade.
diff --git a/lxd/db/images.go b/lxd/db/images.go
index e71ef07ac..d183d253d 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -523,7 +523,7 @@ func (c *Cluster) ImageInsert(fp string, fname string, sz int64, public bool, au
 
 	}
 
-	_, err = tx.Exec("INSERT INTO images_nodes(image_id, node_id) VALUES(?, ?)", id, c.id)
+	_, err = tx.Exec("INSERT INTO images_nodes(image_id, node_id) VALUES(?, ?)", id, c.nodeID)
 	if err != nil {
 		tx.Rollback()
 		return err
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index ff98871cb..7627a63c5 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -124,7 +124,7 @@ func (c *Cluster) NetworkGetInterface(devName string) (int64, *api.Network, erro
 	value := ""
 
 	q := "SELECT networks.id, networks.name, networks_config.value FROM networks LEFT JOIN networks_config ON networks.id=networks_config.network_id WHERE networks_config.key=\"bridge.external_interfaces\" AND networks_config.node_id=?"
-	arg1 := []interface{}{c.id}
+	arg1 := []interface{}{c.nodeID}
 	arg2 := []interface{}{id, name, value}
 	result, err := queryScan(c.db, q, arg1, arg2)
 	if err != nil {
@@ -169,7 +169,7 @@ func (c *Cluster) NetworkConfigGet(id int64) (map[string]string, error) {
         FROM networks_config
 		WHERE network_id=?
                 AND node_id=?`
-	inargs := []interface{}{id, c.id}
+	inargs := []interface{}{id, c.nodeID}
 	outfmt := []interface{}{key, value}
 	results, err := queryScan(c.db, query, inargs, outfmt)
 	if err != nil {
@@ -225,14 +225,14 @@ func (c *Cluster) NetworkCreate(name, description string, config map[string]stri
 
 	// Insert a node-specific entry pointing to ourselves.
 	columns := []string{"network_id", "node_id"}
-	values := []interface{}{id, c.id}
+	values := []interface{}{id, c.nodeID}
 	_, err = query.UpsertObject(tx, "networks_nodes", columns, values)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
 	}
 
-	err = networkConfigAdd(tx, id, c.id, config)
+	err = networkConfigAdd(tx, id, c.nodeID, config)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -263,13 +263,13 @@ func (c *Cluster) NetworkUpdate(name, description string, config map[string]stri
 		return err
 	}
 
-	err = NetworkConfigClear(tx, id, c.id)
+	err = NetworkConfigClear(tx, id, c.nodeID)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = networkConfigAdd(tx, id, c.id, config)
+	err = networkConfigAdd(tx, id, c.nodeID, config)
 	if err != nil {
 		tx.Rollback()
 		return err
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 743a6bd9e..4b42ec1cc 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -165,7 +165,7 @@ func (c *ClusterTx) NodeIsEmpty(id int64) (bool, error) {
 		return false, nil
 	}
 
-	n, err = query.Count(c.tx, "images", "node_id=?", id)
+	n, err = query.Count(c.tx, "images_nodes", "node_id=?", id)
 	if err != nil {
 		return false, errors.Wrapf(err, "failed to get images count for node %d", id)
 	}
@@ -183,7 +183,7 @@ func (c *ClusterTx) NodeClear(id int64) error {
 		return err
 	}
 
-	_, err = c.tx.Exec("DELETE FROM images WHERE node_id=?", id)
+	_, err = c.tx.Exec("DELETE FROM images_nodes WHERE node_id=?", id)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/operations.go b/lxd/db/operations.go
new file mode 100644
index 000000000..bc00e9f47
--- /dev/null
+++ b/lxd/db/operations.go
@@ -0,0 +1,87 @@
+package db
+
+import (
+	"fmt"
+
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/pkg/errors"
+)
+
+// Operation holds information about a single LXD operation running on a node
+// in the cluster.
+type Operation struct {
+	ID          int64  // Stable database identifier
+	UUID        string // User-visible identifier
+	NodeAddress string // Address of the node the operation is running on
+}
+
+// OperationsUUIDs returns the UUIDs of all operations associated with this
+// node.
+func (c *ClusterTx) OperationsUUIDs() ([]string, error) {
+	stmt := "SELECT uuid FROM operations WHERE node_id=?"
+	return query.SelectStrings(c.tx, stmt, c.nodeID)
+}
+
+// OperationByUUID returns the operation with the given UUID.
+func (c *ClusterTx) OperationByUUID(uuid string) (Operation, error) {
+	null := Operation{}
+	operations, err := c.operations("uuid=?", uuid)
+	if err != nil {
+		return null, err
+	}
+	switch len(operations) {
+	case 0:
+		return null, NoSuchObjectError
+	case 1:
+		return operations[0], nil
+	default:
+		return null, fmt.Errorf("more than one node matches")
+	}
+}
+
+// OperationAdd adds a new operations to the table.
+func (c *ClusterTx) OperationAdd(uuid string) (int64, error) {
+	columns := []string{"uuid", "node_id"}
+	values := []interface{}{uuid, c.nodeID}
+	return query.UpsertObject(c.tx, "operations", columns, values)
+}
+
+// OperationRemove removes the operation with the given UUID.
+func (c *ClusterTx) OperationRemove(uuid string) error {
+	result, err := c.tx.Exec("DELETE FROM operations WHERE uuid=?", uuid)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return fmt.Errorf("query deleted %d rows instead of 1", n)
+	}
+	return nil
+}
+
+// Operations returns all operations in the cluster, filtered by the given clause.
+func (c *ClusterTx) operations(where string, args ...interface{}) ([]Operation, error) {
+	operations := []Operation{}
+	dest := func(i int) []interface{} {
+		operations = append(operations, Operation{})
+		return []interface{}{
+			&operations[i].ID,
+			&operations[i].UUID,
+			&operations[i].NodeAddress,
+		}
+	}
+	stmt := `
+SELECT operations.id, uuid, nodes.address FROM operations JOIN nodes ON nodes.id = node_id `
+	if where != "" {
+		stmt += fmt.Sprintf("WHERE %s ", where)
+	}
+	stmt += "ORDER BY operations.id"
+	err := query.SelectObjects(c.tx, dest, stmt, args...)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to fecth operations")
+	}
+	return operations, nil
+}
diff --git a/lxd/db/operations_test.go b/lxd/db/operations_test.go
new file mode 100644
index 000000000..896304bea
--- /dev/null
+++ b/lxd/db/operations_test.go
@@ -0,0 +1,33 @@
+package db_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Add, get and remove an operation.
+func TestOperation(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	id, err := tx.OperationAdd("abcd")
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), id)
+
+	operation, err := tx.OperationByUUID("abcd")
+	require.NoError(t, err)
+	assert.Equal(t, id, operation.ID)
+
+	uuids, err := tx.OperationsUUIDs()
+	require.NoError(t, err)
+	assert.Equal(t, []string{"abcd"}, uuids)
+
+	err = tx.OperationRemove("abcd")
+	require.NoError(t, err)
+
+	_, err = tx.OperationByUUID("abcd")
+	assert.Equal(t, db.NoSuchObjectError, err)
+}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index ad2fad60e..a32263e24 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -164,7 +164,7 @@ func (c *Cluster) StoragePoolGet(poolName string) (int64, *api.StoragePool, erro
 func (c *Cluster) StoragePoolConfigGet(poolID int64) (map[string]string, error) {
 	var key, value string
 	query := "SELECT key, value FROM storage_pools_config WHERE storage_pool_id=? AND (node_id=? OR node_id IS NULL)"
-	inargs := []interface{}{poolID, c.id}
+	inargs := []interface{}{poolID, c.nodeID}
 	outargs := []interface{}{key, value}
 
 	results, err := queryScan(c.db, query, inargs, outargs)
@@ -203,7 +203,7 @@ func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poo
 		return -1, err
 	}
 
-	err = storagePoolConfigAdd(tx, id, c.id, poolConfig)
+	err = storagePoolConfigAdd(tx, id, c.nodeID, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -264,13 +264,13 @@ func (c *Cluster) StoragePoolUpdate(poolName, description string, poolConfig map
 		return err
 	}
 
-	err = StoragePoolConfigClear(tx, poolID, c.id)
+	err = StoragePoolConfigClear(tx, poolID, c.nodeID)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = storagePoolConfigAdd(tx, poolID, c.id, poolConfig)
+	err = storagePoolConfigAdd(tx, poolID, c.nodeID, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -420,13 +420,13 @@ func (c *Cluster) StoragePoolVolumeUpdate(volumeName string, volumeType int, poo
 		return err
 	}
 
-	err = StorageVolumeConfigClear(tx, volumeID, c.id)
+	err = StorageVolumeConfigClear(tx, volumeID, c.nodeID)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = StorageVolumeConfigAdd(tx, volumeID, c.id, volumeConfig)
+	err = StorageVolumeConfigAdd(tx, volumeID, c.nodeID, volumeConfig)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -497,7 +497,7 @@ func (c *Cluster) StoragePoolVolumeCreate(volumeName, volumeDescription string,
 		return -1, err
 	}
 
-	err = StorageVolumeConfigAdd(tx, volumeID, c.id, volumeConfig)
+	err = StorageVolumeConfigAdd(tx, volumeID, c.nodeID, volumeConfig)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index 95d164234..e40bc11cb 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -12,7 +12,7 @@ import (
 func (c *Cluster) StorageVolumeConfigGet(volumeID int64) (map[string]string, error) {
 	var key, value string
 	query := "SELECT key, value FROM storage_volumes_config WHERE storage_volume_id=? AND node_id=?"
-	inargs := []interface{}{volumeID, c.id}
+	inargs := []interface{}{volumeID, c.nodeID}
 	outargs := []interface{}{key, value}
 
 	results, err := queryScan(c.db, query, inargs, outargs)
diff --git a/lxd/db/testing.go b/lxd/db/testing.go
index 9f819f5b0..0950156a3 100644
--- a/lxd/db/testing.go
+++ b/lxd/db/testing.go
@@ -74,7 +74,7 @@ func NewTestClusterTx(t *testing.T) (*ClusterTx, func()) {
 
 	var err error
 
-	clusterTx := &ClusterTx{}
+	clusterTx := &ClusterTx{nodeID: cluster.nodeID}
 	clusterTx.tx, err = cluster.db.Begin()
 	require.NoError(t, err)
 
diff --git a/lxd/db/transaction.go b/lxd/db/transaction.go
index de30c11f7..8220bf8d5 100644
--- a/lxd/db/transaction.go
+++ b/lxd/db/transaction.go
@@ -25,5 +25,6 @@ func (n *NodeTx) Tx() *sql.Tx {
 // It wraps low-level sql.Tx objects and offers a high-level API to fetch and
 // update data.
 type ClusterTx struct {
-	tx *sql.Tx // Handle to a transaction in the cluster dqlite database.
+	tx     *sql.Tx // Handle to a transaction in the cluster dqlite database.
+	nodeID int64   // Node ID of this LXD instance.
 }

From 3c3f3bc2b931e62658d31769eb733b17487e04e2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 15 Nov 2017 11:20:07 +0000
Subject: [PATCH 088/227] Track operations in the cluster database table

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go      |  2 +-
 client/lxd_cluster.go     |  9 +++------
 lxc/cluster.go            |  7 +------
 lxd/api_cluster.go        | 14 +++++---------
 lxd/api_cluster_test.go   |  3 +--
 lxd/cluster/membership.go | 17 ++++++++++++++++-
 lxd/container_console.go  |  2 +-
 lxd/container_delete.go   |  2 +-
 lxd/container_exec.go     |  4 ++--
 lxd/container_post.go     |  6 +++---
 lxd/container_put.go      |  2 +-
 lxd/container_snapshot.go | 11 ++++++-----
 lxd/container_state.go    |  2 +-
 lxd/containers_post.go    | 10 +++++-----
 lxd/daemon_images_test.go |  2 +-
 lxd/images.go             |  8 ++++----
 lxd/migrate.go            |  3 ++-
 lxd/operations.go         | 22 +++++++++++++++++++++-
 18 files changed, 75 insertions(+), 51 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index e67a2b59e..783faba73 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -166,7 +166,7 @@ type ContainerServer interface {
 	BootstrapCluster(name string) (op *Operation, err error)
 	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
 	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
-	LeaveCluster(name string, force bool) (op *Operation, err error)
+	LeaveCluster(name string, force bool) (err error)
 	GetNodes() (nodes []api.Node, err error)
 	GetNode(name string) (node *api.Node, err error)
 
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 1bb302dc9..3c12da1d0 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -72,17 +72,14 @@ func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword, targetCert, nam
 
 // LeaveCluster makes the given node leave the cluster (gracefully or not,
 // depending on the force flag).
-func (r *ProtocolLXD) LeaveCluster(name string, force bool) (*Operation, error) {
+func (r *ProtocolLXD) LeaveCluster(name string, force bool) error {
 	params := ""
 	if force {
 		params += "?force=1"
 	}
 	url := fmt.Sprintf("/cluster/nodes/%s%s", name, params)
-	op, _, err := r.queryOperation("DELETE", url, nil, "")
-	if err != nil {
-		return nil, err
-	}
-	return op, nil
+	_, err := r.queryStruct("DELETE", url, nil, "", nil)
+	return err
 }
 
 // GetNodes returns the current nodes in the cluster.
diff --git a/lxc/cluster.go b/lxc/cluster.go
index bca261ed6..041bb7c7f 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -108,16 +108,11 @@ func (c *clusterCmd) doClusterNodeDelete(conf *config.Config, args []string) err
 		return err
 	}
 
-	op, err := client.LeaveCluster(name, c.force)
+	err = client.LeaveCluster(name, c.force)
 	if err != nil {
 		return err
 	}
 
-	err = op.Wait()
-	if err != nil {
-		return nil
-	}
-
 	fmt.Printf(i18n.G("Node %s removed")+"\n", name)
 	return nil
 }
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 24e3776bd..30de8bb4d 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -157,7 +157,7 @@ func clusterNodesPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 	resources := map[string][]string{}
 	resources["cluster"] = []string{}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -267,7 +267,7 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 	resources := map[string][]string{}
 	resources["cluster"] = []string{}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -379,13 +379,9 @@ func clusterNodeDelete(d *Daemon, r *http.Request) Response {
 		}
 	}
 
-	resources := map[string][]string{}
-	resources["cluster"] = []string{}
-
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	err = run(nil)
 	if err != nil {
-		return InternalError(err)
+		return SmartError(err)
 	}
-
-	return OperationResponse(op)
+	return EmptySyncResponse
 }
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 8d7c3a944..a6d18e7cf 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -162,9 +162,8 @@ func TestCluster_Leave(t *testing.T) {
 	f.FormCluster(daemons)
 
 	client := f.ClientUnix(daemons[1])
-	op, err := client.LeaveCluster("rusp-0", false)
+	err := client.LeaveCluster("rusp-0", false)
 	require.NoError(t, err)
-	assert.NoError(t, op.Wait())
 }
 
 // Test helper for cluster-related APIs.
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 00edaf924..db1a0efc6 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -229,9 +229,12 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 
 	// Get the local config keys for the cluster networks. It assumes that
 	// the local storage pools and networks match the cluster networks, if
-	// not an error will be returned.
+	// not an error will be returned. Also get any outstanding operation,
+	// typically there will be just one, created by the POST /cluster/nodes
+	// request which triggered this code.
 	var pools map[string]map[string]string
 	var networks map[string]map[string]string
+	var operations []string
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		pools, err = tx.StoragePoolConfigs()
 		if err != nil {
@@ -241,6 +244,10 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 		if err != nil {
 			return err
 		}
+		operations, err = tx.OperationsUUIDs()
+		if err != nil {
+			return err
+		}
 		return nil
 	})
 	if err != nil {
@@ -339,6 +346,14 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 				return errors.Wrap(err, "failed to add joining node's network config")
 			}
 		}
+
+		// Migrate outstanding operations.
+		for _, uuid := range operations {
+			_, err := tx.OperationAdd(uuid)
+			if err != nil {
+				return err
+			}
+		}
 		return nil
 	})
 	if err != nil {
diff --git a/lxd/container_console.go b/lxd/container_console.go
index 3f1216dfb..5e11e3255 100644
--- a/lxd/container_console.go
+++ b/lxd/container_console.go
@@ -310,7 +310,7 @@ func containerConsolePost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{ws.container.Name()}
 
-	op, err := operationCreate(operationClassWebsocket, resources,
+	op, err := operationCreate(d.cluster, operationClassWebsocket, resources,
 		ws.Metadata(), ws.Do, nil, ws.Connect)
 	if err != nil {
 		return InternalError(err)
diff --git a/lxd/container_delete.go b/lxd/container_delete.go
index a98e6051d..c0226f349 100644
--- a/lxd/container_delete.go
+++ b/lxd/container_delete.go
@@ -25,7 +25,7 @@ func containerDelete(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, rmct, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, rmct, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_exec.go b/lxd/container_exec.go
index f75dc348a..587312406 100644
--- a/lxd/container_exec.go
+++ b/lxd/container_exec.go
@@ -435,7 +435,7 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 		resources := map[string][]string{}
 		resources["containers"] = []string{ws.container.Name()}
 
-		op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
+		op, err := operationCreate(d.cluster, operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -487,7 +487,7 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_post.go b/lxd/container_post.go
index 25e1fd04b..7c18c5c38 100644
--- a/lxd/container_post.go
+++ b/lxd/container_post.go
@@ -62,7 +62,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 				return InternalError(err)
 			}
 
-			op, err := operationCreate(operationClassTask, resources, nil, ws.Do, nil, nil)
+			op, err := operationCreate(d.cluster, operationClassTask, resources, nil, ws.Do, nil, nil)
 			if err != nil {
 				return InternalError(err)
 			}
@@ -71,7 +71,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 		}
 
 		// Pull mode
-		op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
+		op, err := operationCreate(d.cluster, operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -92,7 +92,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_put.go b/lxd/container_put.go
index 02a05c643..93cba1d0b 100644
--- a/lxd/container_put.go
+++ b/lxd/container_put.go
@@ -75,7 +75,7 @@ func containerPut(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, do, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, do, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index d2c711986..13cb62412 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -126,7 +126,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, snapshot, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, snapshot, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -228,7 +228,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 				return InternalError(err)
 			}
 
-			op, err := operationCreate(operationClassTask, resources, nil, ws.Do, nil, nil)
+			op, err := operationCreate(d.cluster, operationClassTask, resources, nil, ws.Do, nil, nil)
 			if err != nil {
 				return InternalError(err)
 			}
@@ -237,7 +237,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 		}
 
 		// Pull mode
-		op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
+		op, err := operationCreate(d.cluster, operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -270,7 +270,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 	resources := map[string][]string{}
 	resources["containers"] = []string{containerName}
 
-	op, err := operationCreate(operationClassTask, resources, nil, rename, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, rename, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -286,7 +286,8 @@ func snapshotDelete(sc container, name string) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{sc.Name()}
 
-	op, err := operationCreate(operationClassTask, resources, nil, remove, nil, nil)
+	state := sc.DaemonState()
+	op, err := operationCreate(state.Cluster, operationClassTask, resources, nil, remove, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_state.go b/lxd/container_state.go
index 039b8fdb0..306fbca74 100644
--- a/lxd/container_state.go
+++ b/lxd/container_state.go
@@ -159,7 +159,7 @@ func containerStatePut(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, do, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, do, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 666a3fbc2..5f33235b8 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -126,7 +126,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{req.Name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -160,7 +160,7 @@ func createFromNone(d *Daemon, req *api.ContainersPost) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{req.Name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -407,12 +407,12 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 
 	var op *operation
 	if push {
-		op, err = operationCreate(operationClassWebsocket, resources, sink.Metadata(), run, nil, sink.Connect)
+		op, err = operationCreate(d.cluster, operationClassWebsocket, resources, sink.Metadata(), run, nil, sink.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
 	} else {
-		op, err = operationCreate(operationClassTask, resources, nil, run, nil, nil)
+		op, err = operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -507,7 +507,7 @@ func createFromCopy(d *Daemon, req *api.ContainersPost) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{req.Name, req.Source.Source}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/daemon_images_test.go b/lxd/daemon_images_test.go
index 68683dde7..cc91165f6 100644
--- a/lxd/daemon_images_test.go
+++ b/lxd/daemon_images_test.go
@@ -39,7 +39,7 @@ func (suite *daemonImagesTestSuite) TestUseCachedImagesIfAvailable() {
 
 	// Request an image with alias "test" and check that it's the
 	// one we created above.
-	op, err := operationCreate(operationClassTask, map[string][]string{}, nil, nil, nil, nil)
+	op, err := operationCreate(suite.d.cluster, operationClassTask, map[string][]string{}, nil, nil, nil, nil)
 	suite.Req.Nil(err)
 	image, err := suite.d.ImageDownload(op, "img.srv", "simplestreams", "", "", "test", false, false, "", true)
 	suite.Req.Nil(err)
diff --git a/lxd/images.go b/lxd/images.go
index 1dba3e4a0..420ec69c9 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -661,7 +661,7 @@ func imagesPost(d *Daemon, r *http.Request) Response {
 		return nil
 	}
 
-	op, err := operationCreate(operationClassTask, nil, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, nil, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -1133,7 +1133,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["images"] = []string{fingerprint}
 
-	op, err := operationCreate(operationClassTask, resources, nil, rmimg, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, rmimg, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -1567,7 +1567,7 @@ func imageSecret(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["images"] = []string{imgInfo.Fingerprint}
 
-	op, err := operationCreate(operationClassToken, resources, meta, nil, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassToken, resources, meta, nil, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -1587,7 +1587,7 @@ func imageRefresh(d *Daemon, r *http.Request) Response {
 		return autoUpdateImage(d, op, imageId, imageInfo)
 	}
 
-	op, err := operationCreate(operationClassTask, nil, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, nil, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/migrate.go b/lxd/migrate.go
index 6bedadcb1..c892a7da6 100644
--- a/lxd/migrate.go
+++ b/lxd/migrate.go
@@ -726,7 +726,9 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 				return abort(err)
 			}
 
+			state := s.container.DaemonState()
 			actionScriptOp, err := operationCreate(
+				state.Cluster,
 				operationClassWebsocket,
 				nil,
 				nil,
@@ -764,7 +766,6 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 				return abort(err)
 			}
 
-			state := s.container.DaemonState()
 			err = writeActionScript(checkpointDir, actionScriptOp.url, actionScriptOpSecret, state.OS.ExecPath)
 			if err != nil {
 				os.RemoveAll(checkpointDir)
diff --git a/lxd/operations.go b/lxd/operations.go
index dc0388fbd..dd428422a 100644
--- a/lxd/operations.go
+++ b/lxd/operations.go
@@ -10,7 +10,9 @@ import (
 
 	"github.com/gorilla/mux"
 	"github.com/pborman/uuid"
+	"github.com/pkg/errors"
 
+	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -61,6 +63,8 @@ type operation struct {
 
 	// Locking for concurent access to the operation
 	lock sync.Mutex
+
+	cluster *db.Cluster
 }
 
 func (op *operation) done() {
@@ -87,6 +91,13 @@ func (op *operation) done() {
 		delete(operations, op.id)
 		operationsLock.Unlock()
 
+		err := op.cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.OperationRemove(op.id)
+		})
+		if err != nil {
+			logger.Warnf("Failed to delete operation %s: %s", op.id, err)
+		}
+
 		/*
 		 * When we create a new lxc.Container, it adds a finalizer (via
 		 * SetFinalizer) that frees the struct. However, it sometimes
@@ -372,7 +383,7 @@ func (op *operation) UpdateMetadata(opMetadata interface{}) error {
 	return nil
 }
 
-func operationCreate(opClass operationClass, opResources map[string][]string, opMetadata interface{}, onRun func(*operation) error, onCancel func(*operation) error, onConnect func(*operation, *http.Request, http.ResponseWriter) error) (*operation, error) {
+func operationCreate(cluster *db.Cluster, opClass operationClass, opResources map[string][]string, opMetadata interface{}, onRun func(*operation) error, onCancel func(*operation) error, onConnect func(*operation, *http.Request, http.ResponseWriter) error) (*operation, error) {
 	// Main attributes
 	op := operation{}
 	op.id = uuid.NewRandom().String()
@@ -383,6 +394,7 @@ func operationCreate(opClass operationClass, opResources map[string][]string, op
 	op.url = fmt.Sprintf("/%s/operations/%s", version.APIVersion, op.id)
 	op.resources = opResources
 	op.chanDone = make(chan error)
+	op.cluster = cluster
 
 	newMetadata, err := shared.ParseMetadata(opMetadata)
 	if err != nil {
@@ -416,6 +428,14 @@ func operationCreate(opClass operationClass, opResources map[string][]string, op
 	operations[op.id] = &op
 	operationsLock.Unlock()
 
+	err = op.cluster.Transaction(func(tx *db.ClusterTx) error {
+		_, err := tx.OperationAdd(op.id)
+		return err
+	})
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to add operation to database")
+	}
+
 	logger.Debugf("New %s operation: %s", op.class.String(), op.id)
 	_, md, _ := op.Render()
 	eventSend("operation", md)

From 784595b4f43890e8e19f6d2e03af809e6443df55 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 16 Nov 2017 12:00:22 +0000
Subject: [PATCH 089/227] Add cluster.Events task for watching events from
 other nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/events.go | 104 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 104 insertions(+)
 create mode 100644 lxd/cluster/events.go

diff --git a/lxd/cluster/events.go b/lxd/cluster/events.go
new file mode 100644
index 000000000..fe02df4f7
--- /dev/null
+++ b/lxd/cluster/events.go
@@ -0,0 +1,104 @@
+package cluster
+
+import (
+	"fmt"
+	"time"
+
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/endpoints"
+	"github.com/lxc/lxd/lxd/task"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
+	"golang.org/x/net/context"
+)
+
+// Events starts a task that continuosly monitors the list of cluster nodes and
+// maintains a pool of websocket connections against all of them, in order to
+// get notified about events.
+//
+// Whenever an event is received the given callback is invoked.
+func Events(endpoints *endpoints.Endpoints, cluster *db.Cluster, f func(int64, interface{})) (task.Func, task.Schedule) {
+	listeners := map[int64]*lxd.EventListener{}
+
+	// Update our pool of event listeners.
+	update := func(ctx context.Context) {
+		// Get the current cluster nodes.
+		var nodes []db.NodeInfo
+		err := cluster.Transaction(func(tx *db.ClusterTx) error {
+			var err error
+			nodes, err = tx.Nodes()
+			return err
+		})
+		if err != nil {
+			logger.Warnf("Failed to get current cluster nodes: %v", err)
+			return
+		}
+		if len(nodes) == 1 {
+			return // Either we're not clustered or this is a single-node cluster
+		}
+
+		address := endpoints.NetworkAddress()
+
+		ids := make([]int, len(nodes))
+		for i, node := range nodes {
+			ids[i] = int(node.ID)
+
+			// Don't bother trying to connect to offline nodes, or to ourselves.
+			if node.IsDown() || node.Address == address {
+				continue
+			}
+
+			_, ok := listeners[node.ID]
+
+			// The node has already a listener associated to it.
+			if ok {
+				// Double check that the listener is still
+				// connected. If it is, just move on, other
+				// we'll try to connect again.
+				if listeners[node.ID].Active() {
+					continue
+				}
+				delete(listeners, node.ID)
+			}
+
+			listener, err := eventsConnect(node.Address, endpoints.NetworkCert())
+			if err != nil {
+				logger.Warnf("Failed to get events from node %s: %v", node.Address, err)
+				continue
+			}
+			logger.Debugf("Listening for events on node %s", node.Address)
+			listener.AddHandler(nil, func(event interface{}) { f(node.ID, event) })
+			listeners[node.ID] = listener
+		}
+		for id, listener := range listeners {
+			if !shared.IntInSlice(int(id), ids) {
+				listener.Disconnect()
+				delete(listeners, id)
+			}
+		}
+	}
+
+	schedule := task.Every(time.Second)
+
+	return update, schedule
+}
+
+// Establish a client connection to get events from the given node.
+func eventsConnect(address string, cert *shared.CertInfo) (*lxd.EventListener, error) {
+	args := &lxd.ConnectionArgs{
+		TLSServerCert: string(cert.PublicKey()),
+		TLSClientCert: string(cert.PublicKey()),
+		TLSClientKey:  string(cert.PrivateKey()),
+		// Use a special user agent to let the events API handler know that
+		// it should only notify us of local events.
+		UserAgent: "lxd-cluster-notifier",
+	}
+
+	url := fmt.Sprintf("https://%s", address)
+	client, err := lxd.ConnectLXD(url, args)
+	if err != nil {
+		return nil, err
+	}
+	return client.GetEvents()
+}

From 83cc6a6476560d4e77b462215c7f1897ef4b4b0e Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 16 Nov 2017 12:00:55 +0000
Subject: [PATCH 090/227] Make nodes forward events received from other nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/events.go         | 10 ++++++++++
 lxd/cluster/heartbeat.go |  4 ++++
 lxd/daemon.go            |  3 +++
 lxd/events.go            | 34 ++++++++++++++++++++++++++++++++--
 4 files changed, 49 insertions(+), 2 deletions(-)

diff --git a/client/events.go b/client/events.go
index 9738505c1..a05bce83f 100644
--- a/client/events.go
+++ b/client/events.go
@@ -98,3 +98,13 @@ func (e *EventListener) Wait() error {
 	<-e.chActive
 	return e.err
 }
+
+// Active returns true if this listener is still connected, false otherwise.
+func (e *EventListener) Active() bool {
+	select {
+	case <-e.chActive:
+		return false // If the chActive channel is closed we got disconnected
+	default:
+		return true
+	}
+}
diff --git a/lxd/cluster/heartbeat.go b/lxd/cluster/heartbeat.go
index 798d81bd9..365f795d1 100644
--- a/lxd/cluster/heartbeat.go
+++ b/lxd/cluster/heartbeat.go
@@ -44,6 +44,10 @@ func Heartbeat(gateway *Gateway, cluster *db.Cluster) (task.Func, task.Schedule)
 			nodes, err = tx.Nodes()
 			return err
 		})
+		if err != nil {
+			logger.Warnf("Failed to get current cluster nodes: %v", err)
+			return
+		}
 		wg := sync.WaitGroup{}
 		wg.Add(len(nodes))
 		heartbeats := make([]time.Time, len(nodes))
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 5cc19569f..0a41e0fd2 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -530,6 +530,9 @@ func (d *Daemon) Ready() error {
 	/* Heartbeats */
 	d.tasks.Add(cluster.Heartbeat(d.gateway, d.cluster))
 
+	/* Events */
+	d.tasks.Add(cluster.Events(d.endpoints, d.cluster, eventForward))
+
 	// FIXME: There's no hard reason for which we should not run these
 	//        tasks in mock mode. However it requires that we tweak them so
 	//        they exit gracefully without blocking (something we should do
diff --git a/lxd/events.go b/lxd/events.go
index f79b03fcc..a8749b6fd 100644
--- a/lxd/events.go
+++ b/lxd/events.go
@@ -53,6 +53,11 @@ type eventListener struct {
 	id           string
 	lock         sync.Mutex
 	done         bool
+
+	// If true, this listener won't get events forwarded from other
+	// nodes. It only used by listeners created internally by LXD nodes
+	// connecting to other LXD nodes to get their local events only.
+	noForward bool
 }
 
 type eventsServe struct {
@@ -85,6 +90,11 @@ func eventsSocket(r *http.Request, w http.ResponseWriter) error {
 		messageTypes: strings.Split(typeStr, ","),
 	}
 
+	// If this request is an internal one initiated by another node wanting
+	// to watch the events on this node, set the listener to broadcast only
+	// local events.
+	listener.noForward = isClusterNotification(r)
+
 	eventsLock.Lock()
 	eventListeners[listener.id] = &listener
 	eventsLock.Unlock()
@@ -97,7 +107,7 @@ func eventsSocket(r *http.Request, w http.ResponseWriter) error {
 }
 
 func eventsGet(d *Daemon, r *http.Request) Response {
-	return &eventsServe{r}
+	return &eventsServe{req: r}
 }
 
 var eventsCmd = Command{name: "events", get: eventsGet}
@@ -108,15 +118,24 @@ func eventSend(eventType string, eventMessage interface{}) error {
 	event["timestamp"] = time.Now()
 	event["metadata"] = eventMessage
 
+	return eventBroadcast(event)
+}
+
+func eventBroadcast(event shared.Jmap) error {
 	body, err := json.Marshal(event)
 	if err != nil {
 		return err
 	}
 
+	_, isForward := event["node"]
 	eventsLock.Lock()
 	listeners := eventListeners
 	for _, listener := range listeners {
-		if !shared.StringInSlice(eventType, listener.messageTypes) {
+		if isForward && listener.noForward {
+			continue
+		}
+
+		if !shared.StringInSlice(event["type"].(string), listener.messageTypes) {
 			continue
 		}
 
@@ -154,3 +173,14 @@ func eventSend(eventType string, eventMessage interface{}) error {
 
 	return nil
 }
+
+// Forward to the local events dispatcher an event received from another node .
+func eventForward(id int64, data interface{}) {
+	event := data.(map[string]interface{})
+	event["node"] = id
+
+	err := eventBroadcast(event)
+	if err != nil {
+		logger.Warnf("Failed to forward event from node %d: %v", id, err)
+	}
+}

From b81faeaeb5e500321e521e13bf874d4a09b2240b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 17 Nov 2017 09:32:41 +0000
Subject: [PATCH 091/227] Change GET /operations/<uuid> to return non-local ops
 on other nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/operations.go | 44 +++++++++++++++++++++++++++++++++++++-------
 1 file changed, 37 insertions(+), 7 deletions(-)

diff --git a/lxd/operations.go b/lxd/operations.go
index dd428422a..cdc5deb75 100644
--- a/lxd/operations.go
+++ b/lxd/operations.go
@@ -12,6 +12,7 @@ import (
 	"github.com/pborman/uuid"
 	"github.com/pkg/errors"
 
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -459,14 +460,43 @@ func operationGet(id string) (*operation, error) {
 func operationAPIGet(d *Daemon, r *http.Request) Response {
 	id := mux.Vars(r)["id"]
 
-	op, err := operationGet(id)
-	if err != nil {
-		return NotFound
-	}
+	var body *api.Operation
 
-	_, body, err := op.Render()
-	if err != nil {
-		return SmartError(err)
+	// First check the local cache, then the cluster database table.
+	op, err := operationGet(id)
+	if err == nil {
+		_, body, err = op.Render()
+		if err != nil {
+			return SmartError(err)
+		}
+	} else {
+		var address string
+		err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+			operation, err := tx.OperationByUUID(id)
+			if err != nil {
+				return err
+			}
+			address = operation.NodeAddress
+			return nil
+		})
+		if err != nil {
+			return SmartError(err)
+		}
+		cert := d.endpoints.NetworkCert()
+		args := &lxd.ConnectionArgs{
+			TLSServerCert: string(cert.PublicKey()),
+			TLSClientCert: string(cert.PublicKey()),
+			TLSClientKey:  string(cert.PrivateKey()),
+		}
+		url := fmt.Sprintf("https://%s", address)
+		client, err := lxd.ConnectLXD(url, args)
+		if err != nil {
+			return SmartError(err)
+		}
+		body, _, err = client.GetOperation(id)
+		if err != nil {
+			return SmartError(err)
+		}
 	}
 
 	return SyncResponse(true, body)

From 0bf03c2b2ecf7437d1cf753791ed698ca4ecd2d5 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 17 Nov 2017 11:59:45 +0000
Subject: [PATCH 092/227] Support for lxd init --target <node>

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go      |  1 +
 client/lxd.go             |  3 +++
 client/lxd_containers.go  |  6 +++++-
 client/lxd_server.go      | 18 ++++++++++++++++++
 doc/api-extensions.md     |  4 ++++
 lxc/init.go               |  5 ++++-
 lxd/containers_post.go    | 38 ++++++++++++++++++++++++++++++++++++++
 lxd/response.go           | 37 +++++++++++++++++++++++++++++++++++++
 test/suites/clustering.sh | 36 ++++++++++++++++++++++++++++++++++++
 9 files changed, 146 insertions(+), 2 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 783faba73..82e3d7554 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -48,6 +48,7 @@ type ContainerServer interface {
 	UpdateServer(server api.ServerPut, ETag string) (err error)
 	HasExtension(extension string) (exists bool)
 	RequireAuthenticated(authenticated bool)
+	ClusterTargetNode(name string) ContainerServer
 
 	// Certificate functions
 	GetCertificateFingerprints() (fingerprints []string, err error)
diff --git a/client/lxd.go b/client/lxd.go
index 9e0be5200..4421258eb 100644
--- a/client/lxd.go
+++ b/client/lxd.go
@@ -35,6 +35,9 @@ type ProtocolLXD struct {
 	bakeryClient         *httpbakery.Client
 	bakeryInteractor     httpbakery.Interactor
 	requireAuthenticated bool
+
+	// Name of the node that node-specific operations will target.
+	targetNode string
 }
 
 // GetConnectionInfo returns the basic connection information used to interact with the server
diff --git a/client/lxd_containers.go b/client/lxd_containers.go
index 04585e135..ba3f52c52 100644
--- a/client/lxd_containers.go
+++ b/client/lxd_containers.go
@@ -71,7 +71,11 @@ func (r *ProtocolLXD) CreateContainer(container api.ContainersPost) (*Operation,
 	}
 
 	// Send the request
-	op, _, err := r.queryOperation("POST", "/containers", container, "")
+	path := "/containers"
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	op, _, err := r.queryOperation("POST", path, container, "")
 	if err != nil {
 		return nil, err
 	}
diff --git a/client/lxd_server.go b/client/lxd_server.go
index c95d095de..84b401deb 100644
--- a/client/lxd_server.go
+++ b/client/lxd_server.go
@@ -77,3 +77,21 @@ func (r *ProtocolLXD) GetServerResources() (*api.Resources, error) {
 
 	return &resources, nil
 }
+
+// ClusterTargetNode returns a client that will target the given node for
+// node-specific operations such as creating containers, modifying storage
+// configuration etc.
+func (r *ProtocolLXD) ClusterTargetNode(name string) ContainerServer {
+	return &ProtocolLXD{
+		server:               r.server,
+		http:                 r.http,
+		httpCertificate:      r.httpCertificate,
+		httpHost:             r.httpHost,
+		httpProtocol:         r.httpProtocol,
+		httpUserAgent:        r.httpUserAgent,
+		bakeryClient:         r.bakeryClient,
+		bakeryInteractor:     r.bakeryInteractor,
+		requireAuthenticated: r.requireAuthenticated,
+		targetNode:           name,
+	}
+}
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 785d12e47..d84940730 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -409,3 +409,7 @@ This includes the following new endpoints:
 
 * `GET /1.0/cluster/nodes/<name>` (see [RESTful API](rest-api.md) for details)
 * `DELETE /1.0/cluster/nodes/<name>` (see [RESTful API](rest-api.md) for details)
+
+The following existing endpoints have been modified:
+
+ * `POST /1.0/containers` accepts a new targetNode query parameter
diff --git a/lxc/init.go b/lxc/init.go
index caa3ce84a..7e5ed14aa 100644
--- a/lxc/init.go
+++ b/lxc/init.go
@@ -67,6 +67,7 @@ type initCmd struct {
 	network      string
 	storagePool  string
 	instanceType string
+	target       string
 }
 
 func (c *initCmd) showByDefault() bool {
@@ -75,7 +76,7 @@ func (c *initCmd) showByDefault() bool {
 
 func (c *initCmd) usage() string {
 	return i18n.G(
-		`Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>]
+		`Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target <node>]
 
 Create containers from images.
 
@@ -145,6 +146,7 @@ func (c *initCmd) flags() {
 	gnuflag.StringVar(&c.storagePool, "storage", "", i18n.G("Storage pool name"))
 	gnuflag.StringVar(&c.storagePool, "s", "", i18n.G("Storage pool name"))
 	gnuflag.StringVar(&c.instanceType, "t", "", i18n.G("Instance type"))
+	gnuflag.StringVar(&c.target, "target", "", i18n.G("Node name"))
 }
 
 func (c *initCmd) run(conf *config.Config, args []string) error {
@@ -180,6 +182,7 @@ func (c *initCmd) create(conf *config.Config, args []string) (lxd.ContainerServe
 	if err != nil {
 		return nil, "", err
 	}
+	d = d.ClusterTargetNode(c.target)
 
 	/*
 	 * initRequestedEmptyProfiles means user requested empty
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 5f33235b8..66053f7ef 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -11,6 +11,7 @@ import (
 	"github.com/dustinkirkland/golang-petname"
 	"github.com/gorilla/websocket"
 
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/types"
@@ -523,6 +524,43 @@ func containersPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
+	targetNode := r.FormValue("targetNode")
+	if targetNode != "" {
+		address := ""
+		err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+			node, err := tx.NodeByName(targetNode)
+			if err != nil {
+				return err
+			}
+			if node.Address != d.endpoints.NetworkAddress() {
+				address = node.Address
+			}
+			return nil
+		})
+		if err != nil {
+			return SmartError(err)
+		}
+		if address != "" {
+			cert := d.endpoints.NetworkCert()
+			args := &lxd.ConnectionArgs{
+				TLSServerCert: string(cert.PublicKey()),
+				TLSClientCert: string(cert.PublicKey()),
+				TLSClientKey:  string(cert.PrivateKey()),
+			}
+			url := fmt.Sprintf("https://%s", address)
+			client, err := lxd.ConnectLXD(url, args)
+			if err != nil {
+				return SmartError(err)
+			}
+			logger.Debugf("Forward container post request to %s", address)
+			op, err := client.CreateContainer(req)
+			if err != nil {
+				return SmartError(err)
+			}
+			return ForwardedOperationResponse(&op.Operation)
+		}
+	}
+
 	// If no storage pool is found, error out.
 	pools, err := d.cluster.StoragePools()
 	if err != nil || len(pools) == 0 {
diff --git a/lxd/response.go b/lxd/response.go
index 73d8540c1..9fb053412 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -17,6 +17,7 @@ import (
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/version"
 )
 
 type Response interface {
@@ -263,6 +264,42 @@ func OperationResponse(op *operation) Response {
 	return &operationResponse{op}
 }
 
+// Forwarded operation response.
+//
+// Returned when the operation has been created on another node
+type forwardedOperationResponse struct {
+	op *api.Operation
+}
+
+func (r *forwardedOperationResponse) Render(w http.ResponseWriter) error {
+	url := fmt.Sprintf("/%s/operations/%s", version.APIVersion, r.op.ID)
+
+	body := api.ResponseRaw{
+		Response: api.Response{
+			Type:       api.AsyncResponse,
+			Status:     api.OperationCreated.String(),
+			StatusCode: int(api.OperationCreated),
+			Operation:  url,
+		},
+		Metadata: r.op,
+	}
+
+	w.Header().Set("Location", url)
+	w.WriteHeader(202)
+
+	return util.WriteJSON(w, body, debug)
+}
+
+func (r *forwardedOperationResponse) String() string {
+	return r.op.ID
+}
+
+// ForwardedOperationResponse creates a response that forwards the metadata of
+// an operation created on another node.
+func ForwardedOperationResponse(op *api.Operation) Response {
+	return &forwardedOperationResponse{op}
+}
+
 // Error response
 type errorResponse struct {
 	code int
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 108a80e16..05535f9b6 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -96,3 +96,39 @@ test_clustering() {
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
 }
+
+test_clustering_containers() {
+  setup_clustering_bridge
+  prefix="lxd$$"
+  bridge="${prefix}"
+
+  setup_clustering_netns 1
+  LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_ONE_DIR}"
+  ns1="${prefix}1"
+  spawn_lxd_and_bootstrap_cluster "${ns1}" "${bridge}" "${LXD_ONE_DIR}"
+
+  # Add a newline at the end of each line. YAML as weird rules..
+  cert=$(sed ':a;N;$!ba;s/\n/\n\n/g' "${LXD_ONE_DIR}/server.crt")
+
+  # Spawn a second node
+  setup_clustering_netns 2
+  LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_TWO_DIR}"
+  ns2="${prefix}2"
+  spawn_lxd_and_join_cluster "${ns2}" "${bridge}" "${cert}" 2 1 "${LXD_TWO_DIR}"
+
+  # Init a container on a node2, using a client connected to node1
+  LXD_DIR="${LXD_TWO_DIR}" ensure_import_testimage
+  LXD_DIR="${LXD_ONE_DIR}" lxc init --target node2 testimage foo
+  LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q foo
+
+  LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
+
+  LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
+  LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
+  sleep 2
+  rm -f "${LXD_TWO_DIR}/unix.socket"
+  rm -f "${LXD_ONE_DIR}/unix.socket"
+}
+

From 75b26930992eff2ca147186df521d4f40bb84ad7 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 20 Nov 2017 08:02:57 +0000
Subject: [PATCH 093/227] Include node name in GET /containers/<name> and lxc
 info <name>

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/info.go               |  3 +++
 lxd/container_lxc.go      |  5 +++++
 lxd/db/containers.go      | 20 ++++++++++++++++----
 shared/api/container.go   |  3 +++
 test/suites/clustering.sh |  1 +
 5 files changed, 28 insertions(+), 4 deletions(-)

diff --git a/lxc/info.go b/lxc/info.go
index fdb9f250d..7b89e1866 100644
--- a/lxc/info.go
+++ b/lxc/info.go
@@ -115,6 +115,9 @@ func (c *infoCmd) containerInfo(d lxd.ContainerServer, remote config.Remote, nam
 	const layout = "2006/01/02 15:04 UTC"
 
 	fmt.Printf(i18n.G("Name: %s")+"\n", ct.Name)
+	if ct.Node != "" {
+		fmt.Printf(i18n.G("Node: %s")+"\n", ct.Node)
+	}
 	if remote.Addr != "" {
 		fmt.Printf(i18n.G("Remote: %s")+"\n", remote.Addr)
 	}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 91a42faab..6c9a65347 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -458,6 +458,7 @@ func containerLXCLoad(s *state.State, args db.ContainerArgs) (container, error)
 		localConfig:  args.Config,
 		localDevices: args.Devices,
 		stateful:     args.Stateful,
+		node:         args.Node,
 	}
 
 	// Load the config.
@@ -499,6 +500,9 @@ type containerLXC struct {
 
 	// Storage
 	storage storage
+
+	// Clustering
+	node string
 }
 
 func (c *containerLXC) createOperation(action string, reusable bool, reuse bool) (*lxcContainerOperation, error) {
@@ -2837,6 +2841,7 @@ func (c *containerLXC) Render() (interface{}, interface{}, error) {
 			Name:            c.name,
 			Status:          statusCode.String(),
 			StatusCode:      statusCode,
+			Node:            c.node,
 		}
 
 		ct.Description = c.Description()
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 1cd19a33f..c2ef39d0b 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -16,7 +16,8 @@ import (
 // container.
 type ContainerArgs struct {
 	// Don't set manually
-	Id int
+	Id   int
+	Node string
 
 	Description  string
 	Architecture int
@@ -73,7 +74,8 @@ func (c *Cluster) ContainerId(name string) (int, error) {
 }
 
 func (c *Cluster) ContainerGet(name string) (ContainerArgs, error) {
-	var used *time.Time // Hold the db-returned time
+	var used *time.Time    // Hold the db-returned time
+	var nodeAddress string // Hold the db-returned node address
 	description := sql.NullString{}
 
 	args := ContainerArgs{}
@@ -81,9 +83,14 @@ func (c *Cluster) ContainerGet(name string) (ContainerArgs, error) {
 
 	ephemInt := -1
 	statefulInt := -1
-	q := "SELECT id, description, architecture, type, ephemeral, stateful, creation_date, last_use_date FROM containers WHERE name=?"
+	q := `
+SELECT containers.id, containers.description, architecture, type, ephemeral, stateful,
+       creation_date, last_use_date, nodes.name, nodes.address
+  FROM containers JOIN nodes ON node_id = nodes.id
+  WHERE containers.name=?
+`
 	arg1 := []interface{}{name}
-	arg2 := []interface{}{&args.Id, &description, &args.Architecture, &args.Ctype, &ephemInt, &statefulInt, &args.CreationDate, &used}
+	arg2 := []interface{}{&args.Id, &description, &args.Architecture, &args.Ctype, &ephemInt, &statefulInt, &args.CreationDate, &used, &args.Node, &nodeAddress}
 	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		return args, err
@@ -132,6 +139,11 @@ func (c *Cluster) ContainerGet(name string) (ContainerArgs, error) {
 		args.Devices[k] = v
 	}
 
+	if nodeAddress == "0.0.0.0" {
+		// This means we're not clustered, so omit the node name
+		args.Node = ""
+	}
+
 	return args, nil
 }
 
diff --git a/shared/api/container.go b/shared/api/container.go
index 1ecfff755..11c9bf2f6 100644
--- a/shared/api/container.go
+++ b/shared/api/container.go
@@ -70,6 +70,9 @@ type Container struct {
 
 	// API extension: container_last_used_at
 	LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"`
+
+	// API extension: clustering
+	Node string `json:"node" yaml:"node"`
 }
 
 // Writable converts a full Container struct into a ContainerPut struct (filters read-only fields)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 05535f9b6..b40eceb1b 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -122,6 +122,7 @@ test_clustering_containers() {
   LXD_DIR="${LXD_TWO_DIR}" ensure_import_testimage
   LXD_DIR="${LXD_ONE_DIR}" lxc init --target node2 testimage foo
   LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q foo
+  LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Node: node2"
 
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 

From afb43c58cdbe1bddf74ffa0e0b3c5941acbe9df8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 20 Nov 2017 08:47:18 +0000
Subject: [PATCH 094/227] Add cluster.Connect convenience to connect to cluster
 nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go     |  7 +------
 lxd/cluster/connect.go | 29 +++++++++++++++++++++++++++++
 lxd/cluster/events.go  | 13 +------------
 lxd/cluster/notify.go  | 15 +--------------
 lxd/containers_post.go |  9 +--------
 lxd/operations.go      | 10 ++--------
 6 files changed, 35 insertions(+), 48 deletions(-)
 create mode 100644 lxd/cluster/connect.go

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 30de8bb4d..cad2a6dcf 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -362,15 +362,10 @@ func clusterNodeDelete(d *Daemon, r *http.Request) Response {
 	} else {
 		// Try to gracefully disable clustering on the target node.
 		cert := d.endpoints.NetworkCert()
-		args := &lxd.ConnectionArgs{
-			TLSServerCert: string(cert.PublicKey()),
-			TLSClientCert: string(cert.PublicKey()),
-			TLSClientKey:  string(cert.PrivateKey()),
-		}
 		run = func(op *operation) error {
 			// First request for this node to be added to the list of
 			// cluster nodes.
-			client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", address), args)
+			client, err := cluster.Connect(address, cert, false)
 			if err != nil {
 				return err
 			}
diff --git a/lxd/cluster/connect.go b/lxd/cluster/connect.go
new file mode 100644
index 000000000..163cdd7f0
--- /dev/null
+++ b/lxd/cluster/connect.go
@@ -0,0 +1,29 @@
+package cluster
+
+import (
+	"fmt"
+
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/shared"
+)
+
+// Connect is a convenience around lxd.ConnectLXD that configures the client
+// with the correct parameters for node-to-node communication.
+//
+// If 'notify' switch is true, then the user agent will be set to the special
+// value 'lxd-cluster-notifier', which can be used in some cases to distinguish
+// between a regular client request and an internal cluster request.
+func Connect(address string, cert *shared.CertInfo, notify bool) (lxd.ContainerServer, error) {
+	args := &lxd.ConnectionArgs{
+		TLSServerCert: string(cert.PublicKey()),
+		TLSClientCert: string(cert.PublicKey()),
+		TLSClientKey:  string(cert.PrivateKey()),
+		SkipGetServer: true,
+	}
+	if notify {
+		args.UserAgent = "lxd-cluster-notifier"
+	}
+
+	url := fmt.Sprintf("https://%s", address)
+	return lxd.ConnectLXD(url, args)
+}
diff --git a/lxd/cluster/events.go b/lxd/cluster/events.go
index fe02df4f7..9b72d1f20 100644
--- a/lxd/cluster/events.go
+++ b/lxd/cluster/events.go
@@ -1,7 +1,6 @@
 package cluster
 
 import (
-	"fmt"
 	"time"
 
 	lxd "github.com/lxc/lxd/client"
@@ -86,17 +85,7 @@ func Events(endpoints *endpoints.Endpoints, cluster *db.Cluster, f func(int64, i
 
 // Establish a client connection to get events from the given node.
 func eventsConnect(address string, cert *shared.CertInfo) (*lxd.EventListener, error) {
-	args := &lxd.ConnectionArgs{
-		TLSServerCert: string(cert.PublicKey()),
-		TLSClientCert: string(cert.PublicKey()),
-		TLSClientKey:  string(cert.PrivateKey()),
-		// Use a special user agent to let the events API handler know that
-		// it should only notify us of local events.
-		UserAgent: "lxd-cluster-notifier",
-	}
-
-	url := fmt.Sprintf("https://%s", address)
-	client, err := lxd.ConnectLXD(url, args)
+	client, err := Connect(address, cert, true)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/cluster/notify.go b/lxd/cluster/notify.go
index cb5a69a76..7cdbb1766 100644
--- a/lxd/cluster/notify.go
+++ b/lxd/cluster/notify.go
@@ -68,19 +68,6 @@ func NewNotifier(state *state.State, cert *shared.CertInfo, policy NotifierPolic
 		return nil, err
 	}
 
-	// Client parameters to connect to a peer cluster node.
-	args := &lxd.ConnectionArgs{
-		TLSServerCert: string(cert.PublicKey()),
-		TLSClientCert: string(cert.PublicKey()),
-		TLSClientKey:  string(cert.PrivateKey()),
-		// Use a special user agent to let the API handlers know they
-		// should not do any database work.
-		UserAgent: "lxd-cluster-notifier",
-	}
-	if cert.CA() != nil {
-		args.TLSCA = string(cert.CA().Raw)
-	}
-
 	notifier := func(hook func(lxd.ContainerServer) error) error {
 		errs := make([]error, len(peers))
 		wg := sync.WaitGroup{}
@@ -89,7 +76,7 @@ func NewNotifier(state *state.State, cert *shared.CertInfo, policy NotifierPolic
 			logger.Debugf("Notify node %s of state changes", address)
 			go func(i int, address string) {
 				defer wg.Done()
-				client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", address), args)
+				client, err := Connect(address, cert, true)
 				if err != nil {
 					errs[i] = errors.Wrapf(err, "failed to connect to peer %s", address)
 					return
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 66053f7ef..2541edbf6 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -11,7 +11,6 @@ import (
 	"github.com/dustinkirkland/golang-petname"
 	"github.com/gorilla/websocket"
 
-	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/types"
@@ -542,13 +541,7 @@ func containersPost(d *Daemon, r *http.Request) Response {
 		}
 		if address != "" {
 			cert := d.endpoints.NetworkCert()
-			args := &lxd.ConnectionArgs{
-				TLSServerCert: string(cert.PublicKey()),
-				TLSClientCert: string(cert.PublicKey()),
-				TLSClientKey:  string(cert.PrivateKey()),
-			}
-			url := fmt.Sprintf("https://%s", address)
-			client, err := lxd.ConnectLXD(url, args)
+			client, err := cluster.Connect(address, cert, false)
 			if err != nil {
 				return SmartError(err)
 			}
diff --git a/lxd/operations.go b/lxd/operations.go
index cdc5deb75..46ae99597 100644
--- a/lxd/operations.go
+++ b/lxd/operations.go
@@ -12,7 +12,7 @@ import (
 	"github.com/pborman/uuid"
 	"github.com/pkg/errors"
 
-	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -483,13 +483,7 @@ func operationAPIGet(d *Daemon, r *http.Request) Response {
 			return SmartError(err)
 		}
 		cert := d.endpoints.NetworkCert()
-		args := &lxd.ConnectionArgs{
-			TLSServerCert: string(cert.PublicKey()),
-			TLSClientCert: string(cert.PublicKey()),
-			TLSClientKey:  string(cert.PrivateKey()),
-		}
-		url := fmt.Sprintf("https://%s", address)
-		client, err := lxd.ConnectLXD(url, args)
+		client, err := cluster.Connect(address, cert, false)
 		if err != nil {
 			return SmartError(err)
 		}

From 4fee940d9efeb2ccfc068d30c7b998f3bc3047cb Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 21 Nov 2017 07:57:05 +0000
Subject: [PATCH 095/227] Use unshare/nsenter instead of ip netns for isolating
 test nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/includes/clustering.sh | 34 +++++++++++++++++++++-------------
 test/includes/lxd.sh        |  2 +-
 test/suites/clustering.sh   |  1 -
 3 files changed, 22 insertions(+), 15 deletions(-)

diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index a87af60df..0695ba9ab 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -35,7 +35,10 @@ setup_clustering_netns() {
 
   echo "==> Setup clustering netns ${ns}"
 
-  ip netns add "${ns}"
+  mkdir -p /run/netns
+  touch "/run/netns/${ns}"
+
+  unshare -n sh -c "mount --bind /proc/self/ns/net /run/netns/${ns}"
 
   veth1="v${ns}1"
   veth2="v${ns}2"
@@ -43,31 +46,36 @@ setup_clustering_netns() {
   ip link add "${veth1}" type veth peer name "${veth2}"
   ip link set "${veth2}" netns "${ns}"
 
-  bridge="br$$"
-  brctl addif "${bridge}" "${veth1}"
+  nsbridge="br$$"
+  brctl addif "${nsbridge}" "${veth1}"
 
   ip link set "${veth1}" up
 
-  ip netns exec "${ns}" ip link set dev lo up
-  ip netns exec "${ns}" ip link set dev "${veth2}" name eth0
-  ip netns exec "${ns}" ip link set eth0 up
-  ip netns exec "${ns}" ip addr add "10.1.1.10${id}/16" dev eth0
-  ip netns exec "${ns}" ip route add default via 10.1.1.1
+  (
+    cat <<EOF
+    ip link set dev lo up
+    ip link set dev "${veth2}" name eth0
+    ip link set eth0 up
+    ip addr add "10.1.1.10${id}/16" dev eth0
+    ip route add default via 10.1.1.1
+EOF
+  ) | nsenter --net="/run/netns/${ns}" sh
 }
 
 teardown_clustering_netns() {
   prefix="lxd$$"
-  bridge="br$$"
+  nsbridge="br$$"
   for ns in $(ip netns | grep "${prefix}" | cut -f 1 -d " ") ; do
       echo "==> Teardown clustering netns ${ns}"
       veth1="v${ns}1"
       veth2="v${ns}2"
-      ip netns exec "${ns}" ip link set eth0 down
-      ip netns exec "${ns}" ip link set lo down
+      nsenter --net="/run/netns/${ns}" ip link set eth0 down
+      nsenter --net="/run/netns/${ns}" ip link set lo down
       ip link set "${veth1}" down
-      brctl delif "${bridge}" "${veth1}"
+      brctl delif "${nsbridge}" "${veth1}"
       ip link delete "${veth1}" type veth
-      ip netns delete "${ns}"
+      umount "/run/netns/${ns}"
+      rm "/run/netns/${ns}"
   done
 }
 
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index 71c0799a7..a03684701 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -43,7 +43,7 @@ spawn_lxd() {
     if [ "${LXD_NETNS}" = "" ]; then
 	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     else
-	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" ip netns exec "${LXD_NETNS}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" nsenter --net="/run/netns/${LXD_NETNS}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     fi
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index b40eceb1b..60039e2f4 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -132,4 +132,3 @@ test_clustering_containers() {
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
 }
-

From 20beaf5b095247076284c48756d21de7a23a1633 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 13 Jan 2018 11:22:11 +0000
Subject: [PATCH 096/227] Add cluster.ConnectIfContainerIsRemote helper

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/connect.go | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

diff --git a/lxd/cluster/connect.go b/lxd/cluster/connect.go
index 163cdd7f0..d8f58e667 100644
--- a/lxd/cluster/connect.go
+++ b/lxd/cluster/connect.go
@@ -4,6 +4,7 @@ import (
 	"fmt"
 
 	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
 )
 
@@ -27,3 +28,24 @@ func Connect(address string, cert *shared.CertInfo, notify bool) (lxd.ContainerS
 	url := fmt.Sprintf("https://%s", address)
 	return lxd.ConnectLXD(url, args)
 }
+
+// ConnectIfContainerIsRemote figures out the address of the node which is
+// running the container with the given name. If it's not the local node will
+// connect to it and return the connected client, otherwise it will just return
+// nil.
+func ConnectIfContainerIsRemote(cluster *db.Cluster, name string, cert *shared.CertInfo) (lxd.ContainerServer, error) {
+	var address string // Node address
+	err := cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		address, err = tx.ContainerNodeAddress(name)
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	if address == "" {
+		// The container is running right on this node, no need to connect.
+		return nil, nil
+	}
+	return Connect(address, cert, false)
+}

From b05c0f02b9e2747557db110c0b6795a4387a2c57 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 15 Jan 2018 11:50:29 +0000
Subject: [PATCH 097/227] Add LXD client GetServerHost method returning the
 server host

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go | 1 +
 client/lxd_server.go | 5 +++++
 2 files changed, 6 insertions(+)

diff --git a/client/interfaces.go b/client/interfaces.go
index 82e3d7554..d445eeb25 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -45,6 +45,7 @@ type ContainerServer interface {
 	// Server functions
 	GetServer() (server *api.Server, ETag string, err error)
 	GetServerResources() (resources *api.Resources, err error)
+	GetServerHost() (host string, err error)
 	UpdateServer(server api.ServerPut, ETag string) (err error)
 	HasExtension(extension string) (exists bool)
 	RequireAuthenticated(authenticated bool)
diff --git a/client/lxd_server.go b/client/lxd_server.go
index 84b401deb..58ec3d304 100644
--- a/client/lxd_server.go
+++ b/client/lxd_server.go
@@ -39,6 +39,11 @@ func (r *ProtocolLXD) GetServer() (*api.Server, string, error) {
 	return &server, etag, nil
 }
 
+// GetServerHost returns the URL of the LXD host this client points to.
+func (r *ProtocolLXD) GetServerHost() (string, error) {
+	return r.httpHost, nil
+}
+
 // UpdateServer updates the server status to match the provided Server struct
 func (r *ProtocolLXD) UpdateServer(server api.ServerPut, ETag string) error {
 	// Send the request

From 67c1873a27e86aa308e7441e5ce668e6634ce778 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 13 Jan 2018 11:15:21 +0000
Subject: [PATCH 098/227] Add ForwardResponse for forwarding a request to
 another node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/response.go | 68 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 68 insertions(+)

diff --git a/lxd/response.go b/lxd/response.go
index 9fb053412..4db6702b5 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -13,6 +13,8 @@ import (
 
 	"github.com/mattn/go-sqlite3"
 
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -106,6 +108,72 @@ func SyncResponseHeaders(success bool, metadata interface{}, headers map[string]
 
 var EmptySyncResponse = &syncResponse{success: true, metadata: make(map[string]interface{})}
 
+type forwardedResponse struct {
+	client  lxd.ContainerServer
+	request *http.Request
+}
+
+func (r *forwardedResponse) Render(w http.ResponseWriter) error {
+	host, err := r.client.GetServerHost()
+	if err != nil {
+		return err
+	}
+
+	url := fmt.Sprintf("%s%s", host, r.request.URL.RequestURI())
+	forwarded, err := http.NewRequest(r.request.Method, url, r.request.Body)
+	if err != nil {
+		return err
+	}
+	for key := range r.request.Header {
+		forwarded.Header.Set(key, r.request.Header.Get(key))
+	}
+
+	httpClient, err := r.client.GetHTTPClient()
+	if err != nil {
+		return err
+	}
+	response, err := httpClient.Do(forwarded)
+	if err != nil {
+		return err
+	}
+
+	for key := range response.Header {
+		w.Header().Set(key, response.Header.Get(key))
+	}
+
+	w.WriteHeader(response.StatusCode)
+	_, err = io.Copy(w, response.Body)
+	return err
+}
+
+func (r *forwardedResponse) String() string {
+	return fmt.Sprintf("request to %s", r.request.URL)
+}
+
+// ForwardedResponse takes a request directed to a node and forwards it to
+// another node, writing back the response it gegs.
+func ForwardedResponse(client lxd.ContainerServer, request *http.Request) Response {
+	return &forwardedResponse{
+		client:  client,
+		request: request,
+	}
+}
+
+// ForwardedResponseIfContainerIsRemote redirects a request to the node running
+// the container with the given name. If the container is local, nothing gets
+// done and nil is returned.
+func ForwardedResponseIfContainerIsRemote(d *Daemon, r *http.Request, name string) (Response, error) {
+	cert := d.endpoints.NetworkCert()
+	client, err := cluster.ConnectIfContainerIsRemote(d.cluster, name, cert)
+	if err != nil {
+		return nil, err
+	}
+	if client == nil {
+		return nil, nil
+	}
+	return ForwardedResponse(client, r), nil
+}
+
 // File transfer response
 type fileResponseEntry struct {
 	identifier string

From 4d263dfcc29a2acad979f59b2145febd755b2a8b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 21 Nov 2017 08:54:41 +0000
Subject: [PATCH 099/227] Make it possible to start a container on a different
 node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_state.go    |  9 +++++++++
 lxd/db/containers.go      | 43 +++++++++++++++++++++++++++++++++++++++++++
 test/suites/clustering.sh |  4 ++++
 3 files changed, 56 insertions(+)

diff --git a/lxd/container_state.go b/lxd/container_state.go
index 306fbca74..62e328b15 100644
--- a/lxd/container_state.go
+++ b/lxd/container_state.go
@@ -31,6 +31,15 @@ func containerState(d *Daemon, r *http.Request) Response {
 func containerStatePut(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
 
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	raw := api.ContainerStatePut{}
 
 	// We default to -1 (i.e. no timeout) here instead of 0 (instant
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index c2ef39d0b..994b187d3 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -41,6 +41,49 @@ const (
 	CTypeSnapshot ContainerType = 1
 )
 
+// ContainerNodeAddress returns the address of the node hosting the container
+// with the given name.
+//
+// It returns the empty string if the container is hosted on this node.
+func (c *ClusterTx) ContainerNodeAddress(name string) (string, error) {
+	stmt := `
+SELECT nodes.id, nodes.address
+  FROM nodes JOIN containers ON containers.node_id = nodes.id
+    WHERE containers.name = ?
+`
+	var address string
+	var id int64
+	rows, err := c.tx.Query(stmt, name)
+	if err != nil {
+		return "", err
+	}
+	defer rows.Close()
+
+	if !rows.Next() {
+		return "", NoSuchObjectError
+	}
+
+	err = rows.Scan(&id, &address)
+	if err != nil {
+		return "", err
+	}
+
+	if rows.Next() {
+		return "", fmt.Errorf("more than one node associated with container")
+	}
+
+	err = rows.Err()
+	if err != nil {
+		return "", err
+	}
+
+	if id == c.nodeID {
+		return "", nil
+	}
+
+	return address, nil
+}
+
 func (c *Cluster) ContainerRemove(name string) error {
 	id, err := c.ContainerId(name)
 	if err != nil {
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 60039e2f4..a0dd6a27f 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -124,6 +124,10 @@ test_clustering_containers() {
   LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q foo
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Node: node2"
 
+  LXD_DIR="${LXD_ONE_DIR}" lxc start foo
+  LXD_DIR="${LXD_TWO_DIR}" lxc info foo | grep -q "Status: Running"
+  LXD_DIR="${LXD_ONE_DIR}" lxc stop foo
+
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown

From 03c5fbc2273410ec8a7e98d2fa930afbc2d007aa Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 21 Nov 2017 11:22:13 +0000
Subject: [PATCH 100/227] Add "lxc cluster rename" command and related API

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go      |  1 +
 client/lxd_cluster.go     |  7 +++++++
 doc/rest-api.md           | 14 ++++++++++++++
 lxc/cluster.go            | 44 +++++++++++++++++++++++++++++++++++---------
 lxd/api_cluster.go        | 25 ++++++++++++++++++++++++-
 lxd/api_cluster_test.go   | 23 +++++++++++++++++++++++
 lxd/db/node.go            | 26 ++++++++++++++++++++++++++
 lxd/db/node_test.go       | 19 +++++++++++++++++++
 shared/api/cluster.go     |  9 +++++++++
 test/suites/clustering.sh |  5 ++++-
 10 files changed, 162 insertions(+), 11 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index d445eeb25..4becddb88 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -171,6 +171,7 @@ type ContainerServer interface {
 	LeaveCluster(name string, force bool) (err error)
 	GetNodes() (nodes []api.Node, err error)
 	GetNode(name string) (node *api.Node, err error)
+	RenameNode(name string, node api.NodePost) (err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 3c12da1d0..93e9a6d6b 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -107,3 +107,10 @@ func (r *ProtocolLXD) GetNode(name string) (*api.Node, error) {
 
 	return &node, nil
 }
+
+// RenameNode changes the name of an existing node
+func (r *ProtocolLXD) RenameNode(name string, node api.NodePost) error {
+	url := fmt.Sprintf("/cluster/nodes/%s", name)
+	_, _, err := r.query("POST", url, node, "")
+	return err
+}
diff --git a/doc/rest-api.md b/doc/rest-api.md
index 3df4dd58b..1ff691a3c 100644
--- a/doc/rest-api.md
+++ b/doc/rest-api.md
@@ -2618,6 +2618,20 @@ of the cluster certificate:
         }
     }
 
+## `/1.0/cluster/nodes/<name>`
+### POST
+ * Description: rename a cluster node
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+Input:
+
+    {
+        "name": "node1",
+    }
+
 ### DELETE (optional `?force=1`)
  * Description: remove a node from the cluster
  * Introduced: with API extension `clustering`
diff --git a/lxc/cluster.go b/lxc/cluster.go
index 041bb7c7f..02e5d833c 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -8,6 +8,7 @@ import (
 	yaml "gopkg.in/yaml.v2"
 
 	"github.com/lxc/lxd/lxc/config"
+	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/gnuflag"
 	"github.com/lxc/lxd/shared/i18n"
 	"github.com/olekukonko/tablewriter"
@@ -29,6 +30,9 @@ lxc cluster list [<remote>:]
 lxc cluster show [<remote>:]<node>
     Show details of a node.
 
+lxc cluster rename [<remote>:]<node> <new-name>
+    Rename a cluster node.
+
 lxc cluster delete [<remote>:]<node> [--force]
     Delete a node from the cluster.`)
 }
@@ -46,15 +50,14 @@ func (c *clusterCmd) run(conf *config.Config, args []string) error {
 		return errUsage
 	}
 
-	if args[0] == "list" {
+	switch args[0] {
+	case "list":
 		return c.doClusterList(conf, args)
-	}
-
-	if args[0] == "show" {
+	case "show":
 		return c.doClusterNodeShow(conf, args)
-	}
-
-	if args[0] == "delete" {
+	case "rename":
+		return c.doClusterNodeRename(conf, args)
+	case "delete":
 		return c.doClusterNodeDelete(conf, args)
 	}
 
@@ -66,7 +69,6 @@ func (c *clusterCmd) doClusterNodeShow(conf *config.Config, args []string) error
 		return errArgs
 	}
 
-	// [[lxc cluster]] remove production:bionic-1
 	remote, name, err := conf.ParseRemote(args[1])
 	if err != nil {
 		return err
@@ -92,12 +94,36 @@ func (c *clusterCmd) doClusterNodeShow(conf *config.Config, args []string) error
 	return nil
 }
 
+func (c *clusterCmd) doClusterNodeRename(conf *config.Config, args []string) error {
+	if len(args) < 3 {
+		return errArgs
+	}
+	newName := args[2]
+
+	remote, name, err := conf.ParseRemote(args[1])
+	if err != nil {
+		return err
+	}
+
+	client, err := conf.GetContainerServer(remote)
+	if err != nil {
+		return err
+	}
+
+	err = client.RenameNode(name, api.NodePost{Name: newName})
+	if err != nil {
+		return err
+	}
+
+	fmt.Printf(i18n.G("Node %s renamed to %s")+"\n", name, newName)
+	return nil
+}
+
 func (c *clusterCmd) doClusterNodeDelete(conf *config.Config, args []string) error {
 	if len(args) < 2 {
 		return errArgs
 	}
 
-	// [[lxc cluster]] remove production:bionic-1
 	remote, name, err := conf.ParseRemote(args[1])
 	if err != nil {
 		return err
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index cad2a6dcf..1b1ca19ce 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -299,12 +299,14 @@ func clusterNodesGet(d *Daemon, r *http.Request) Response {
 var clusterNodeCmd = Command{
 	name:   "cluster/nodes/{name}",
 	get:    clusterNodeGet,
+	post:   clusterNodePost,
 	delete: clusterNodeDelete,
 }
 
 func clusterNodeGet(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
-	node := api.Node{Name: name}
+	node := api.Node{}
+	node.Name = name
 	address := ""
 	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
 		dbNode, err := tx.NodeByName(name)
@@ -339,6 +341,27 @@ func clusterNodeGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, node)
 }
 
+func clusterNodePost(d *Daemon, r *http.Request) Response {
+	name := mux.Vars(r)["name"]
+
+	req := api.NodePost{}
+
+	// Parse the request
+	err := json.NewDecoder(r.Body).Decode(&req)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		return tx.NodeRename(name, req.Name)
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+
+	return EmptySyncResponse
+}
+
 func clusterNodeDelete(d *Daemon, r *http.Request) Response {
 	force, err := strconv.Atoi(r.FormValue("force"))
 	if err != nil {
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index a6d18e7cf..c77cc6ca7 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -7,6 +7,7 @@ import (
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
@@ -166,6 +167,28 @@ func TestCluster_Leave(t *testing.T) {
 	require.NoError(t, err)
 }
 
+// A LXD node can be renamed.
+func TestCluster_NodeRename(t *testing.T) {
+	daemon, cleanup := newDaemon(t)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.EnableNetworking(daemon, "")
+
+	client := f.ClientUnix(daemon)
+
+	op, err := client.BootstrapCluster("buzz")
+	require.NoError(t, err)
+	require.NoError(t, op.Wait())
+
+	node := api.NodePost{Name: "rusp"}
+	err = client.RenameNode("buzz", node)
+	require.NoError(t, err)
+
+	_, err = client.GetNode("rusp")
+	require.NoError(t, err)
+}
+
 // Test helper for cluster-related APIs.
 type clusterFixture struct {
 	t       *testing.T
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 4b42ec1cc..01d151f5d 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -69,6 +69,32 @@ func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
 	return c.nodes("")
 }
 
+// NodeRename changes the name of an existing node.
+//
+// Return an error if a node with the same name already exists.
+func (c *ClusterTx) NodeRename(old, new string) error {
+	count, err := query.Count(c.tx, "nodes", "name=?", new)
+	if err != nil {
+		return errors.Wrap(err, "failed to check existing nodes")
+	}
+	if count != 0 {
+		return DbErrAlreadyDefined
+	}
+	stmt := `UPDATE nodes SET name=? WHERE name=?`
+	result, err := c.tx.Exec(stmt, new, old)
+	if err != nil {
+		return errors.Wrap(err, "failed to update node name")
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return errors.Wrap(err, "failed to get rows count")
+	}
+	if n != 1 {
+		return fmt.Errorf("expected to update one row, not %d", n)
+	}
+	return nil
+}
+
 // Nodes returns all LXD nodes part of the cluster.
 func (c *ClusterTx) nodes(where string, args ...interface{}) ([]NodeInfo, error) {
 	nodes := []NodeInfo{}
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 22ee430d9..84a6bceff 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -37,6 +37,25 @@ func TestNodeAdd(t *testing.T) {
 	assert.Equal(t, "buzz", node.Name)
 }
 
+// Rename a node
+func TestNodeRename(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+	err = tx.NodeRename("buzz", "rusp")
+	require.NoError(t, err)
+	node, err := tx.NodeByName("rusp")
+	require.NoError(t, err)
+	assert.Equal(t, "rusp", node.Name)
+
+	_, err = tx.NodeAdd("buzz", "5.6.7.8:666")
+	require.NoError(t, err)
+	err = tx.NodeRename("rusp", "buzz")
+	assert.Equal(t, db.DbErrAlreadyDefined, err)
+}
+
 // Remove a new raft node.
 func TestNodeRemove(t *testing.T) {
 	tx, cleanup := db.NewTestClusterTx(t)
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 5b653e869..1320a1ff4 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -37,7 +37,16 @@ type RaftNode struct {
 	Address string `json:"address" yaml:"address"`
 }
 
+// NodePost represents the fields required to rename a LXD node.
+//
+// API extension: clustering
+type NodePost struct {
+	Name string `json:"name" yaml:"name"`
+}
+
 // Node represents the a LXD node in the cluster.
+//
+// API extension: clustering
 type Node struct {
 	Name     string `json:"name" yaml:"name"`
 	URL      string `json:"url" yaml:"url"`
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index a0dd6a27f..cdbec3671 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -82,8 +82,11 @@ test_clustering() {
   # notified.
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
+  # Rename a node using the pre-existing name.
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster rename node4 node5
+
   # Remove a node gracefully.
-  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster delete node4
+  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster delete node5
 
   LXD_DIR="${LXD_FOUR_DIR}" lxd shutdown
   LXD_DIR="${LXD_THREE_DIR}" lxd shutdown

From ec216de730d9ce919472a2fb864dfb03feb19c56 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 22 Nov 2017 08:44:42 +0000
Subject: [PATCH 101/227] Make GET /1.0/containers fetch container status from
 cluster nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/containers_get.go     | 118 +++++++++++++++++++++++++++++++++++++++-------
 lxd/db/containers.go      |  45 ++++++++++++++++++
 lxd/db/containers_test.go |  48 +++++++++++++++++++
 lxd/db/node.go            |   6 ++-
 test/suites/clustering.sh |  25 +++++++++-
 5 files changed, 221 insertions(+), 21 deletions(-)
 create mode 100644 lxd/db/containers_test.go

diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 376190b86..425ef8590 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -3,19 +3,24 @@ package main
 import (
 	"fmt"
 	"net/http"
+	"sort"
+	"sync"
 	"time"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/version"
+	"github.com/pkg/errors"
 )
 
 func containersGet(d *Daemon, r *http.Request) Response {
 	for i := 0; i < 100; i++ {
-		result, err := doContainersGet(d.State(), util.IsRecursionRequest(r))
+		result, err := doContainersGet(d, r)
 		if err == nil {
 			return SyncResponse(true, result)
 		}
@@ -33,38 +38,80 @@ func containersGet(d *Daemon, r *http.Request) Response {
 	return InternalError(fmt.Errorf("DB is locked"))
 }
 
-func doContainersGet(s *state.State, recursion bool) (interface{}, error) {
-	result, err := s.Cluster.ContainersList(db.CTypeRegular)
+func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
+	var result map[string][]string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		result, err = tx.ContainersListByNodeAddress()
+		return err
+	})
 	if err != nil {
-		return nil, err
+		return []string{}, err
 	}
 
+	recursion := util.IsRecursionRequest(r)
 	resultString := []string{}
 	resultList := []*api.Container{}
-	if err != nil {
-		return []string{}, err
+	resultMu := sync.Mutex{}
+
+	resultAppend := func(name string, c *api.Container, err error) {
+		if err != nil {
+			c = &api.Container{
+				Name:       name,
+				Status:     api.Error.String(),
+				StatusCode: api.Error}
+		}
+		resultMu.Lock()
+		resultList = append(resultList, c)
+		resultMu.Unlock()
 	}
 
-	for _, container := range result {
-		if !recursion {
-			url := fmt.Sprintf("/%s/containers/%s", version.APIVersion, container)
-			resultString = append(resultString, url)
-		} else {
-			c, err := doContainerGet(s, container)
-			if err != nil {
-				c = &api.Container{
-					Name:       container,
-					Status:     api.Error.String(),
-					StatusCode: api.Error}
+	wg := sync.WaitGroup{}
+	for address, containers := range result {
+		// Mark containers on unavailable nodes as down
+		if recursion && address == "0.0.0.0" {
+			for _, container := range containers {
+				resultAppend(container, nil, fmt.Errorf("unavailable"))
 			}
-			resultList = append(resultList, c)
+		}
+
+		// For recursion requests we need to fetch the state of remote
+		// containers from their respective nodes.
+		if recursion && address != "" && !isClusterNotification(r) {
+			wg.Add(1)
+			go func(address string) {
+				cert := d.endpoints.NetworkCert()
+				cs, err := doContainersGetFromNode(address, cert)
+				for _, c := range cs {
+					resultAppend(c.Name, &c, err)
+				}
+				wg.Done()
+			}(address)
+			continue
+		}
+
+		for _, container := range containers {
+			if !recursion {
+				url := fmt.Sprintf("/%s/containers/%s", version.APIVersion, container)
+				resultString = append(resultString, url)
+				continue
+			}
+
+			c, err := doContainerGet(d.State(), container)
+			resultAppend(container, c, err)
 		}
 	}
+	wg.Wait()
 
 	if !recursion {
 		return resultString, nil
 	}
 
+	// Sort the result list by name.
+	sort.Slice(resultList, func(i, j int) bool {
+		return resultList[i].Name < resultList[j].Name
+	})
+
 	return resultList, nil
 }
 
@@ -81,3 +128,38 @@ func doContainerGet(s *state.State, cname string) (*api.Container, error) {
 
 	return cts.(*api.Container), nil
 }
+
+// Fetch information about the containers on the given remote node, using the
+// rest API and with a timeout of 30 seconds.
+func doContainersGetFromNode(node string, cert *shared.CertInfo) ([]api.Container, error) {
+	f := func() ([]api.Container, error) {
+		client, err := cluster.Connect(node, cert, true)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to connect to node %s", node)
+		}
+		containers, err := client.GetContainers()
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to get containers from node %s", node)
+		}
+		return containers, nil
+	}
+
+	timeout := time.After(30 * time.Second)
+	done := make(chan struct{})
+
+	var containers []api.Container
+	var err error
+
+	go func() {
+		containers, err = f()
+		done <- struct{}{}
+	}()
+
+	select {
+	case <-timeout:
+		err = fmt.Errorf("timeout getting containers from node %s", node)
+	case <-done:
+	}
+
+	return containers, err
+}
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 994b187d3..a2ddcbc58 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -84,6 +84,51 @@ SELECT nodes.id, nodes.address
 	return address, nil
 }
 
+// ContainersListByNodeAddress returns the names of all containers grouped by
+// cluster node address.
+//
+// The node address of containers running on the local node is set to the empty
+// string, to distinguish it from remote nodes.
+//
+// Containers whose node is down are addeded to the special address "0.0.0.0".
+func (c *ClusterTx) ContainersListByNodeAddress() (map[string][]string, error) {
+	stmt := `
+SELECT containers.name, nodes.id, nodes.address, nodes.heartbeat
+  FROM containers JOIN nodes ON nodes.id = containers.node_id
+  WHERE containers.type=?
+  ORDER BY containers.id
+`
+	rows, err := c.tx.Query(stmt, CTypeRegular)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	result := map[string][]string{}
+
+	for i := 0; rows.Next(); i++ {
+		var name, nodeAddress string
+		var nodeID int64
+		var nodeHeartbeat time.Time
+		err := rows.Scan(&name, &nodeID, &nodeAddress, &nodeHeartbeat)
+		if err != nil {
+			return nil, err
+		}
+		if nodeID == c.nodeID {
+			nodeAddress = ""
+		} else if nodeIsDown(nodeHeartbeat) {
+			nodeAddress = "0.0.0.0"
+		}
+		result[nodeAddress] = append(result[nodeAddress], name)
+	}
+
+	err = rows.Err()
+	if err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
 func (c *Cluster) ContainerRemove(name string) error {
 	id, err := c.ContainerId(name)
 	if err != nil {
diff --git a/lxd/db/containers_test.go b/lxd/db/containers_test.go
new file mode 100644
index 000000000..a5a68c444
--- /dev/null
+++ b/lxd/db/containers_test.go
@@ -0,0 +1,48 @@
+package db_test
+
+import (
+	"testing"
+	"time"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Containers are grouped by node address.
+func TestContainersListByNodeAddress(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	nodeID1 := int64(1) // This is the default local node
+
+	nodeID2, err := tx.NodeAdd("node2", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	nodeID3, err := tx.NodeAdd("node3", "5.6.7.8:666")
+	require.NoError(t, err)
+	require.NoError(t, tx.NodeHeartbeat("5.6.7.8:666", time.Now().Add(-time.Minute)))
+
+	addContainer(t, tx, nodeID2, "c1")
+	addContainer(t, tx, nodeID1, "c2")
+	addContainer(t, tx, nodeID3, "c3")
+	addContainer(t, tx, nodeID2, "c4")
+
+	result, err := tx.ContainersListByNodeAddress()
+	require.NoError(t, err)
+	assert.Equal(
+		t,
+		map[string][]string{
+			"":            {"c2"},
+			"1.2.3.4:666": {"c1", "c4"},
+			"0.0.0.0":     {"c3"},
+		}, result)
+}
+
+func addContainer(t *testing.T, tx *db.ClusterTx, nodeID int64, name string) {
+	stmt := `
+INSERT INTO containers(node_id, name, architecture, type) VALUES (?, ?, 1, ?)
+`
+	_, err := tx.Tx().Exec(stmt, nodeID, name, db.CTypeRegular)
+	require.NoError(t, err)
+}
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 01d151f5d..2982bd884 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -24,7 +24,7 @@ type NodeInfo struct {
 // IsDown returns true if the last heartbeat time of the node is older than 20
 // seconds.
 func (n NodeInfo) IsDown() bool {
-	return n.Heartbeat.Before(time.Now().Add(-20 * time.Second))
+	return nodeIsDown(n.Heartbeat)
 }
 
 // NodeByAddress returns the node with the given network address.
@@ -216,3 +216,7 @@ func (c *ClusterTx) NodeClear(id int64) error {
 
 	return nil
 }
+
+func nodeIsDown(heartbeat time.Time) bool {
+	return heartbeat.Before(time.Now().Add(-20 * time.Second))
+}
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index cdbec3671..bab1576d2 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -121,21 +121,42 @@ test_clustering_containers() {
   ns2="${prefix}2"
   spawn_lxd_and_join_cluster "${ns2}" "${bridge}" "${cert}" 2 1 "${LXD_TWO_DIR}"
 
-  # Init a container on a node2, using a client connected to node1
+  # Spawn a third node
+  setup_clustering_netns 3
+  LXD_THREE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_THREE_DIR}"
+  ns3="${prefix}3"
+  spawn_lxd_and_join_cluster "${ns3}" "${bridge}" "${cert}" 3 1 "${LXD_THREE_DIR}"
+
+  # Init a container on node2, using a client connected to node1
   LXD_DIR="${LXD_TWO_DIR}" ensure_import_testimage
   LXD_DIR="${LXD_ONE_DIR}" lxc init --target node2 testimage foo
-  LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q foo
+
+  # The container is visible through both nodes
+  LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q STOPPED
+  LXD_DIR="${LXD_TWO_DIR}" lxc list | grep foo | grep -q STOPPED
+
+  # A Node: field indicates on which node the container is running
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Node: node2"
 
+  # Start and stop the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc start foo
   LXD_DIR="${LXD_TWO_DIR}" lxc info foo | grep -q "Status: Running"
+  LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q RUNNING
   LXD_DIR="${LXD_ONE_DIR}" lxc stop foo
 
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
+  # Shutdown node 2, wait for it to be considered offline, and list
+  # containers.
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
+  sleep 22
+  LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q ERROR
+
+  LXD_DIR="${LXD_THREE_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2
+  rm -f "${LXD_THREE_DIR}/unix.socket"
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
 }

From 7a25a2a4090bb2187e726c6775a1c263d34d61d8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 23 Nov 2017 07:30:21 +0000
Subject: [PATCH 102/227] Add Clustered field to api.ServerEnvironment

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go           |  1 +
 client/lxd_server.go           |  5 +++++
 lxd/api_1.0.go                 | 16 +++++++++++++++-
 lxd/api_cluster_test.go        |  4 ++++
 lxd/daemon_integration_test.go |  2 ++
 shared/api/server.go           |  3 +++
 6 files changed, 30 insertions(+), 1 deletion(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 4becddb88..5e33e0531 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -49,6 +49,7 @@ type ContainerServer interface {
 	UpdateServer(server api.ServerPut, ETag string) (err error)
 	HasExtension(extension string) (exists bool)
 	RequireAuthenticated(authenticated bool)
+	IsClustered() (clustered bool)
 	ClusterTargetNode(name string) ContainerServer
 
 	// Certificate functions
diff --git a/client/lxd_server.go b/client/lxd_server.go
index 58ec3d304..ec5f09857 100644
--- a/client/lxd_server.go
+++ b/client/lxd_server.go
@@ -66,6 +66,11 @@ func (r *ProtocolLXD) HasExtension(extension string) bool {
 	return false
 }
 
+// IsClustered returns true if the server is part of a LXD cluster.
+func (r *ProtocolLXD) IsClustered() bool {
+	return r.server.Environment.Clustered
+}
+
 // GetServerResources returns the resources available to a given LXD server
 func (r *ProtocolLXD) GetServerResources() (*api.Resources, error) {
 	if !r.HasExtension("resources") {
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 9c986e153..5926e6d40 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -109,6 +109,19 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
+	clustered := false
+	err = d.db.Transaction(func(tx *db.NodeTx) error {
+		addresses, err := tx.RaftNodeAddresses()
+		if err != nil {
+			return err
+		}
+		clustered = len(addresses) > 0
+		return nil
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+
 	certificate := string(d.endpoints.NetworkPublicKey())
 	var certificateFingerprint string
 	if certificate != "" {
@@ -140,7 +153,8 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		KernelVersion:          uname.Release,
 		Server:                 "lxd",
 		ServerPid:              os.Getpid(),
-		ServerVersion:          version.Version}
+		ServerVersion:          version.Version,
+		Clustered:              clustered}
 
 	drivers := readStoragePoolDriversCache()
 	for driver, version := range drivers {
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index c77cc6ca7..e4f187478 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -26,6 +26,10 @@ func TestCluster_Bootstrap(t *testing.T) {
 	op, err := client.BootstrapCluster("buzz")
 	require.NoError(t, err)
 	require.NoError(t, op.Wait())
+
+	_, _, err = client.GetServer()
+	require.NoError(t, err)
+	assert.True(t, client.IsClustered())
 }
 
 // A LXD node which is already configured for networking can join an existing
diff --git a/lxd/daemon_integration_test.go b/lxd/daemon_integration_test.go
index 2012dc657..0cdf6a06d 100644
--- a/lxd/daemon_integration_test.go
+++ b/lxd/daemon_integration_test.go
@@ -20,6 +20,8 @@ func TestIntegration_UnixSocket(t *testing.T) {
 	server, _, err := client.GetServer()
 	require.NoError(t, err)
 	assert.Equal(t, "trusted", server.Auth)
+	assert.False(t, server.Environment.Clustered)
+	assert.False(t, client.IsClustered())
 }
 
 // Create a new daemon for testing.
diff --git a/shared/api/server.go b/shared/api/server.go
index c89426d59..570041533 100644
--- a/shared/api/server.go
+++ b/shared/api/server.go
@@ -16,6 +16,9 @@ type ServerEnvironment struct {
 	ServerVersion          string   `json:"server_version" yaml:"server_version"`
 	Storage                string   `json:"storage" yaml:"storage"`
 	StorageVersion         string   `json:"storage_version" yaml:"storage_version"`
+
+	// API extension: clustering
+	Clustered bool `json:"clustered" yaml:"clustered"`
 }
 
 // ServerPut represents the modifiable fields of a LXD server configuration

From d1831313ec7b0490063f774fe66dc68a358bbcb2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 23 Nov 2017 08:10:53 +0000
Subject: [PATCH 103/227] Add NODE column to 'lxc list' when clustered

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/list.go               | 29 ++++++++++++++++++++++++-----
 test/main.sh              |  3 ++-
 test/suites/clustering.sh |  3 ++-
 3 files changed, 28 insertions(+), 7 deletions(-)

diff --git a/lxc/list.go b/lxc/list.go
index 4796af0d4..005df23a7 100644
--- a/lxc/list.go
+++ b/lxc/list.go
@@ -105,6 +105,8 @@ Pre-defined column shorthand chars:
 
 	t - Type (persistent or ephemeral)
 
+	N - Node hosting the container
+
 Custom columns are defined with "key[:name][:maxWidth]":
 
 	KEY: The (extended) config key to display
@@ -126,9 +128,11 @@ lxc list -c ns,user.comment:comment
 	List images with their running state and user comment. `)
 }
 
+const defaultColumns = "ns46tSN"
+
 func (c *listCmd) flags() {
-	gnuflag.StringVar(&c.columnsRaw, "c", "ns46tS", i18n.G("Columns"))
-	gnuflag.StringVar(&c.columnsRaw, "columns", "ns46tS", i18n.G("Columns"))
+	gnuflag.StringVar(&c.columnsRaw, "c", defaultColumns, i18n.G("Columns"))
+	gnuflag.StringVar(&c.columnsRaw, "columns", defaultColumns, i18n.G("Columns"))
 	gnuflag.StringVar(&c.format, "format", "table", i18n.G("Format (csv|json|table|yaml)"))
 	gnuflag.BoolVar(&c.fast, "fast", false, i18n.G("Fast mode (same as --columns=nsacPt)"))
 }
@@ -448,7 +452,7 @@ func (c *listCmd) run(conf *config.Config, args []string) error {
 		cts = append(cts, cinfo)
 	}
 
-	columns, err := c.parseColumns()
+	columns, err := c.parseColumns(d.IsClustered())
 	if err != nil {
 		return err
 	}
@@ -456,7 +460,7 @@ func (c *listCmd) run(conf *config.Config, args []string) error {
 	return c.listContainers(conf, remote, cts, filters, columns)
 }
 
-func (c *listCmd) parseColumns() ([]column, error) {
+func (c *listCmd) parseColumns(clustered bool) ([]column, error) {
 	columnsShorthandMap := map[rune]column{
 		'4': {i18n.G("IPV4"), c.IP4ColumnData, true, false},
 		'6': {i18n.G("IPV6"), c.IP6ColumnData, true, false},
@@ -475,7 +479,7 @@ func (c *listCmd) parseColumns() ([]column, error) {
 	}
 
 	if c.fast {
-		if c.columnsRaw != "ns46tS" {
+		if c.columnsRaw != defaultColumns {
 			// --columns was specified too
 			return nil, fmt.Errorf("Can't specify --fast with --columns")
 		} else {
@@ -483,6 +487,18 @@ func (c *listCmd) parseColumns() ([]column, error) {
 		}
 	}
 
+	if clustered {
+		columnsShorthandMap['N'] = column{
+			i18n.G("NODE"), c.nodeColumnData, false, false}
+	} else {
+		if c.columnsRaw != defaultColumns {
+			if strings.ContainsAny(c.columnsRaw, "N") {
+				return nil, fmt.Errorf("Can't specify column N when not clustered")
+			}
+		}
+		c.columnsRaw = strings.Replace(c.columnsRaw, "N", "", -1)
+	}
+
 	columnList := strings.Split(c.columnsRaw, ",")
 
 	columns := []column{}
@@ -685,5 +701,8 @@ func (c *listCmd) NumberOfProcessesColumnData(cInfo api.Container, cState *api.C
 	}
 
 	return ""
+}
 
+func (c *listCmd) nodeColumnData(cInfo api.Container, cState *api.ContainerState, cSnaps []api.ContainerSnapshot) string {
+	return cInfo.Node
 }
diff --git a/test/main.sh b/test/main.sh
index 96c5b99a4..c65d63f4b 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -194,7 +194,8 @@ run_test test_kernel_limits "kernel limits"
 run_test test_macaroon_auth "macaroon authentication"
 run_test test_console "console"
 run_test test_proxy_device "proxy device"
-run_test test_clustering "clustering"
+run_test test_clustering_membership "clustering"
+run_test test_clustering_containers "clustering"
 
 # shellcheck disable=SC2034
 TEST_RESULT=success
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index bab1576d2..8b39b5dd3 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -1,4 +1,4 @@
-test_clustering() {
+test_clustering_membership() {
   setup_clustering_bridge
   prefix="lxd$$"
   bridge="${prefix}"
@@ -134,6 +134,7 @@ test_clustering_containers() {
 
   # The container is visible through both nodes
   LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q STOPPED
+  LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q node2
   LXD_DIR="${LXD_TWO_DIR}" lxc list | grep foo | grep -q STOPPED
 
   # A Node: field indicates on which node the container is running

From cba46aaf863a188d26df8a863fae4bb23274bb48 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 4 Dec 2017 09:27:00 +0000
Subject: [PATCH 104/227] Fixed clustering integration tests

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/main.sh              | 4 ++--
 test/suites/clustering.sh | 6 ++++++
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/test/main.sh b/test/main.sh
index c65d63f4b..80905322a 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -194,8 +194,8 @@ run_test test_kernel_limits "kernel limits"
 run_test test_macaroon_auth "macaroon authentication"
 run_test test_console "console"
 run_test test_proxy_device "proxy device"
-run_test test_clustering_membership "clustering"
-run_test test_clustering_containers "clustering"
+run_test test_clustering_membership "clustering membership"
+run_test test_clustering_containers "clustering containers"
 
 # shellcheck disable=SC2034
 TEST_RESULT=success
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 8b39b5dd3..5c90dc980 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -98,6 +98,9 @@ test_clustering_membership() {
   rm -f "${LXD_THREE_DIR}/unix.socket"
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
+
+  teardown_clustering_netns
+  teardown_clustering_bridge
 }
 
 test_clustering_containers() {
@@ -160,4 +163,7 @@ test_clustering_containers() {
   rm -f "${LXD_THREE_DIR}/unix.socket"
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
+
+  teardown_clustering_netns
+  teardown_clustering_bridge
 }

From 5dd06a5d6777c4df006a144b2999f199e6889e4f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 11:59:36 +0000
Subject: [PATCH 105/227] Add storage_pools_nodes table and storage_pools.state
 column

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go      | 11 ++++++++++-
 lxd/db/cluster/update.go      | 18 ++++++++++++++++++
 lxd/db/cluster/update_test.go | 19 +++++++++++++++++++
 lxd/db/storage_pools.go       | 16 +++++++++++++++-
 test/includes/lxd.sh          |  1 +
 5 files changed, 63 insertions(+), 2 deletions(-)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index a38e7895c..088ba8183 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -193,6 +193,7 @@ CREATE TABLE storage_pools (
     name TEXT NOT NULL,
     driver TEXT NOT NULL,
     description TEXT,
+    state INTEGER NOT NULL DEFAULT 0,
     UNIQUE (name)
 );
 CREATE TABLE storage_pools_config (
@@ -205,6 +206,14 @@ CREATE TABLE storage_pools_config (
     FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
     FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
+CREATE TABLE storage_pools_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (storage_pool_id, node_id),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
 CREATE TABLE storage_volumes (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
@@ -225,5 +234,5 @@ CREATE TABLE storage_volumes_config (
     FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
 
-INSERT INTO schema (version, updated_at) VALUES (3, strftime("%s"))
+INSERT INTO schema (version, updated_at) VALUES (4, strftime("%s"))
 `
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 0fc4f7df0..40b0b60c9 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -26,6 +26,24 @@ var updates = map[int]schema.Update{
 	1: updateFromV0,
 	2: updateFromV1,
 	3: updateFromV2,
+	4: updateFromV3,
+}
+
+func updateFromV3(tx *sql.Tx) error {
+	stmt := `
+CREATE TABLE storage_pools_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (storage_pool_id, node_id),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+ALTER TABLE storage_pools ADD COLUMN state INTEGER NOT NULL DEFAULT 0;
+UPDATE storage_pools SET state = 1;
+`
+	_, err := tx.Exec(stmt)
+	return err
 }
 
 func updateFromV2(tx *sql.Tx) error {
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index 8f43a1df1..c015e85a6 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -205,3 +205,22 @@ func TestUpdateFromV2(t *testing.T) {
 	require.NoError(t, err)
 	assert.Equal(t, int64(0), n)
 }
+
+func TestUpdateFromV3(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(4, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'c1', '', '1.1.1.1', 666, 999, ?)", time.Now())
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO storage_pools VALUES (1, 'p1', 'zfs', '', 0)")
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO storage_pools_nodes VALUES (1, 1, 1)")
+	require.NoError(t, err)
+
+	// Unique constraint on storage_pool_id/node_id
+	_, err = db.Exec("INSERT INTO storage_pools_nodes VALUES (1, 1, 1)")
+	require.Error(t, err)
+}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index a32263e24..498c8685e 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -62,6 +62,12 @@ func (c *ClusterTx) StoragePoolConfigAdd(poolID, nodeID int64, config map[string
 	return storagePoolConfigAdd(c.tx, poolID, nodeID, config)
 }
 
+// Storage pools state.
+const (
+	storagePoolPending int = iota // Storage pool defined but not yet created.
+	storagePoolCreated            // Storage pool created on all nodes.
+)
+
 // Get all storage pools.
 func (c *Cluster) StoragePools() ([]string, error) {
 	var name string
@@ -191,7 +197,7 @@ func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poo
 		return -1, err
 	}
 
-	result, err := tx.Exec("INSERT INTO storage_pools (name, description, driver) VALUES (?, ?, ?)", poolName, poolDescription, poolDriver)
+	result, err := tx.Exec("INSERT INTO storage_pools (name, description, driver, state) VALUES (?, ?, ?, ?)", poolName, poolDescription, poolDriver, storagePoolCreated)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -203,6 +209,14 @@ func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poo
 		return -1, err
 	}
 
+	// Insert a node-specific entry pointing to ourselves.
+	columns := []string{"storage_pool_id", "node_id"}
+	values := []interface{}{id, c.nodeID}
+	_, err = query.UpsertObject(tx, "storage_pools_nodes", columns, values)
+	if err != nil {
+		return -1, err
+	}
+
 	err = storagePoolConfigAdd(tx, id, c.nodeID, poolConfig)
 	if err != nil {
 		tx.Rollback()
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index a03684701..bd3573fd4 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -209,6 +209,7 @@ kill_lxd() {
         check_empty_table "${daemon_dir}/raft/db.bin" "profiles_devices"
         check_empty_table "${daemon_dir}/raft/db.bin" "profiles_devices_config"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools"
+        check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools_nodes"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools_config"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_volumes"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_volumes_config"

From 61bbe859ddce49a5395119c0a73b5af9c1981296 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 12:30:08 +0000
Subject: [PATCH 106/227] Add db.NodesCount to return the number of existing
 nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 12 ++++++++++++
 lxd/cluster/membership_test.go | 13 +++++++++++++
 lxd/db/node.go                 | 12 ++++++++++++
 lxd/db/node_test.go            | 16 ++++++++++++++++
 4 files changed, 53 insertions(+)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index db1a0efc6..9602c2dff 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -482,6 +482,18 @@ func List(state *state.State) ([]db.NodeInfo, map[int64]bool, error) {
 	return nodes, flags, nil
 }
 
+// Count is a convenience for checking the current number of nodes in the
+// cluster.
+func Count(state *state.State) (int, error) {
+	var count int
+	err := state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		count, err = tx.NodesCount()
+		return err
+	})
+	return count, err
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index b454e7824..f38f43c65 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -127,6 +127,10 @@ func TestBootstrap(t *testing.T) {
 	conn, err := driver.Open("test.db")
 	require.NoError(t, err)
 	require.NoError(t, conn.Close())
+
+	count, err := cluster.Count(state)
+	require.NoError(t, err)
+	assert.Equal(t, 1, count)
 }
 
 // If pre-conditions are not met, a descriptive error is returned.
@@ -306,6 +310,11 @@ func TestJoin(t *testing.T) {
 	assert.True(t, flags[1])
 	assert.True(t, flags[2])
 
+	// The Count function returns the number of nodes.
+	count, err := cluster.Count(state)
+	require.NoError(t, err)
+	assert.Equal(t, 2, count)
+
 	// Leave the cluster.
 	leaving, err := cluster.Leave(state, gateway, "rusp", false /* force */)
 	require.NoError(t, err)
@@ -325,6 +334,10 @@ func TestJoin(t *testing.T) {
 	future := raft.GetConfiguration()
 	require.NoError(t, future.Error())
 	assert.Len(t, future.Configuration().Servers, 1)
+
+	count, err = cluster.Count(state)
+	require.NoError(t, err)
+	assert.Equal(t, 1, count)
 }
 
 // Helper for setting fixtures for Bootstrap tests.
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 2982bd884..172d79448 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -69,6 +69,18 @@ func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
 	return c.nodes("")
 }
 
+// NodesCount returns the number of nodes in the LXD cluster.
+//
+// Since there's always at least one node row, even when not-clustered, the
+// return value is greater than zero
+func (c *ClusterTx) NodesCount() (int, error) {
+	count, err := query.Count(c.tx, "nodes", "")
+	if err != nil {
+		return 0, errors.Wrap(err, "failed to count existing nodes")
+	}
+	return count, nil
+}
+
 // NodeRename changes the name of an existing node.
 //
 // Return an error if a node with the same name already exists.
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 84a6bceff..d95363746 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -37,6 +37,22 @@ func TestNodeAdd(t *testing.T) {
 	assert.Equal(t, "buzz", node.Name)
 }
 
+func TestNodesCount(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	count, err := tx.NodesCount()
+	require.NoError(t, err)
+	assert.Equal(t, 1, count) // There's always at least one node.
+
+	_, err = tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	count, err = tx.NodesCount()
+	require.NoError(t, err)
+	assert.Equal(t, 2, count)
+}
+
 // Rename a node
 func TestNodeRename(t *testing.T) {
 	tx, cleanup := db.NewTestClusterTx(t)

From 197fc475733862d2ff40941f4760270a35efd7e4 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 15:13:50 +0000
Subject: [PATCH 107/227] Change db.SelectConfig to support params substitution

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/config.go            | 4 ++--
 lxd/db/networks.go          | 2 +-
 lxd/db/query/config.go      | 8 ++++----
 lxd/db/query/config_test.go | 8 ++++----
 lxd/db/storage_pools.go     | 3 +--
 5 files changed, 12 insertions(+), 13 deletions(-)

diff --git a/lxd/db/config.go b/lxd/db/config.go
index d76d8188a..5a44dd1db 100644
--- a/lxd/db/config.go
+++ b/lxd/db/config.go
@@ -4,7 +4,7 @@ import "github.com/lxc/lxd/lxd/db/query"
 
 // Config fetches all LXD node-level config keys.
 func (n *NodeTx) Config() (map[string]string, error) {
-	return query.SelectConfig(n.tx, "config")
+	return query.SelectConfig(n.tx, "config", "")
 }
 
 // UpdateConfig updates the given LXD node-level configuration keys in the
@@ -15,7 +15,7 @@ func (n *NodeTx) UpdateConfig(values map[string]string) error {
 
 // Config fetches all LXD cluster config keys.
 func (c *ClusterTx) Config() (map[string]string, error) {
-	return query.SelectConfig(c.tx, "config")
+	return query.SelectConfig(c.tx, "config", "")
 }
 
 // UpdateConfig updates the given LXD cluster configuration keys in the
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 7627a63c5..e08db3736 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -21,7 +21,7 @@ func (c *ClusterTx) NetworkConfigs() (map[string]map[string]string, error) {
 	networks := make(map[string]map[string]string, len(names))
 	for _, name := range names {
 		table := "networks_config JOIN networks ON networks.id=networks_config.network_id"
-		config, err := query.SelectConfig(c.tx, table, fmt.Sprintf("networks.name='%s'", name))
+		config, err := query.SelectConfig(c.tx, table, "networks.name=?", name)
 		if err != nil {
 			return nil, err
 		}
diff --git a/lxd/db/query/config.go b/lxd/db/query/config.go
index 878b6d8f0..94acf49ea 100644
--- a/lxd/db/query/config.go
+++ b/lxd/db/query/config.go
@@ -11,13 +11,13 @@ import (
 // additional WHERE filters can be specified.
 //
 // Returns a map of key names to their associated values.
-func SelectConfig(tx *sql.Tx, table string, filters ...string) (map[string]string, error) {
+func SelectConfig(tx *sql.Tx, table string, where string, args ...interface{}) (map[string]string, error) {
 	query := fmt.Sprintf("SELECT key, value FROM %s", table)
-	if len(filters) > 0 {
-		query += " WHERE " + strings.Join(filters, " ")
+	if where != "" {
+		query += fmt.Sprintf(" WHERE %s", where)
 	}
 
-	rows, err := tx.Query(query)
+	rows, err := tx.Query(query, args...)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/db/query/config_test.go b/lxd/db/query/config_test.go
index fe09735e3..d77026d17 100644
--- a/lxd/db/query/config_test.go
+++ b/lxd/db/query/config_test.go
@@ -11,14 +11,14 @@ import (
 
 func TestSelectConfig(t *testing.T) {
 	tx := newTxForConfig(t)
-	values, err := query.SelectConfig(tx, "test")
+	values, err := query.SelectConfig(tx, "test", "")
 	require.NoError(t, err)
 	assert.Equal(t, map[string]string{"foo": "x", "bar": "zz"}, values)
 }
 
 func TestSelectConfig_WithFilters(t *testing.T) {
 	tx := newTxForConfig(t)
-	values, err := query.SelectConfig(tx, "test", "key='bar'")
+	values, err := query.SelectConfig(tx, "test", "key=?", "bar")
 	require.NoError(t, err)
 	assert.Equal(t, map[string]string{"bar": "zz"}, values)
 }
@@ -31,7 +31,7 @@ func TestUpdateConfig_NewKeys(t *testing.T) {
 	err := query.UpdateConfig(tx, "test", values)
 	require.NoError(t, err)
 
-	values, err = query.SelectConfig(tx, "test")
+	values, err = query.SelectConfig(tx, "test", "")
 	require.NoError(t, err)
 	assert.Equal(t, map[string]string{"foo": "y", "bar": "zz"}, values)
 }
@@ -44,7 +44,7 @@ func TestDeleteConfig_Delete(t *testing.T) {
 	err := query.UpdateConfig(tx, "test", values)
 
 	require.NoError(t, err)
-	values, err = query.SelectConfig(tx, "test")
+	values, err = query.SelectConfig(tx, "test", "")
 	require.NoError(t, err)
 	assert.Equal(t, map[string]string{"bar": "zz"}, values)
 }
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 498c8685e..7d154c666 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -22,8 +22,7 @@ func (c *ClusterTx) StoragePoolConfigs() (map[string]map[string]string, error) {
 		table := `
 storage_pools_config JOIN storage_pools ON storage_pools.id=storage_pools_config.storage_pool_id
 `
-		filter := fmt.Sprintf("storage_pools.name='%s'", name)
-		config, err := query.SelectConfig(c.tx, table, filter)
+		config, err := query.SelectConfig(c.tx, table, "storage_pools.name=?", name)
 		if err != nil {
 			return nil, err
 		}

From 5a074eaffaa80700116923a34af4ee51a8d512b2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 15:31:35 +0000
Subject: [PATCH 108/227] Update storage_pools_nodes when a node joins the
 cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go |  6 +++++-
 lxd/db/storage_pools.go   | 20 +++++++++++++++++++-
 2 files changed, 24 insertions(+), 2 deletions(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 9602c2dff..447d189a4 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -318,10 +318,14 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 			if !ok {
 				return fmt.Errorf("joining node has no config for pool %s", name)
 			}
+			err := tx.StoragePoolNodeJoin(id, node.ID)
+			if err != nil {
+				return errors.Wrap(err, "failed to add joining node's to the pool")
+			}
 			// We only need to add the source key, since the other keys are global and
 			// are already there.
 			config = map[string]string{"source": config["source"]}
-			err := tx.StoragePoolConfigAdd(id, node.ID, config)
+			err = tx.StoragePoolConfigAdd(id, node.ID, config)
 			if err != nil {
 				return errors.Wrap(err, "failed to add joining node's pool config")
 			}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 7d154c666..d8e3dd708 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -12,6 +12,10 @@ import (
 
 // StoragePoolConfigs returns a map associating each storage pool name to its
 // config values.
+//
+// The config values are the ones defined for the node this function is run
+// on. They are used by cluster.Join when a new node joins the cluster and its
+// configuration needs to be migrated to the cluster database.
 func (c *ClusterTx) StoragePoolConfigs() (map[string]map[string]string, error) {
 	names, err := query.SelectStrings(c.tx, "SELECT name FROM storage_pools")
 	if err != nil {
@@ -22,7 +26,9 @@ func (c *ClusterTx) StoragePoolConfigs() (map[string]map[string]string, error) {
 		table := `
 storage_pools_config JOIN storage_pools ON storage_pools.id=storage_pools_config.storage_pool_id
 `
-		config, err := query.SelectConfig(c.tx, table, "storage_pools.name=?", name)
+		config, err := query.SelectConfig(
+			c.tx, table, "storage_pools.name=? AND storage_pools_config.storage_pool_id=?",
+			name, c.nodeID)
 		if err != nil {
 			return nil, err
 		}
@@ -56,6 +62,18 @@ func (c *ClusterTx) StoragePoolIDs() (map[string]int64, error) {
 	return ids, nil
 }
 
+// StoragePoolNodeJoin adds a new entry in the storage_pools_nodes table.
+//
+// It should only be used when a new node joins the cluster, when it's safe to
+// assume that the relevant pool has already been created on the joining node,
+// and we just need to track it.
+func (c *ClusterTx) StoragePoolNodeJoin(poolID, nodeID int64) error {
+	columns := []string{"storage_pool_id", "node_id"}
+	values := []interface{}{poolID, nodeID}
+	_, err := query.UpsertObject(c.tx, "storage_pools_nodes", columns, values)
+	return err
+}
+
 // StoragePoolConfigAdd adds a new entry in the storage_pools_config table
 func (c *ClusterTx) StoragePoolConfigAdd(poolID, nodeID int64, config map[string]string) error {
 	return storagePoolConfigAdd(c.tx, poolID, nodeID, config)

From aa71b7d8669c90cec3b7c73fbf28088c285817ec Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 15:33:01 +0000
Subject: [PATCH 109/227] Add db helper functions to create pending storage
 pools

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/storage_pools.go      | 173 +++++++++++++++++++++++++++++++++++++++++++
 lxd/db/storage_pools_test.go |  90 ++++++++++++++++++++++
 2 files changed, 263 insertions(+)
 create mode 100644 lxd/db/storage_pools_test.go

diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index d8e3dd708..8c85aecc1 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -3,10 +3,12 @@ package db
 import (
 	"database/sql"
 	"fmt"
+	"strings"
 
 	_ "github.com/mattn/go-sqlite3"
 
 	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 )
 
@@ -37,6 +39,23 @@ storage_pools_config JOIN storage_pools ON storage_pools.id=storage_pools_config
 	return pools, nil
 }
 
+// StoragePoolID returns the ID of the pool with the given name.
+func (c *ClusterTx) StoragePoolID(name string) (int64, error) {
+	stmt := "SELECT id FROM storage_pools WHERE name=?"
+	ids, err := query.SelectIntegers(c.tx, stmt, name)
+	if err != nil {
+		return -1, err
+	}
+	switch len(ids) {
+	case 0:
+		return -1, NoSuchObjectError
+	case 1:
+		return int64(ids[0]), nil
+	default:
+		return -1, fmt.Errorf("more than one pool has the given name")
+	}
+}
+
 // StoragePoolIDs returns a map associating each storage pool name to its ID.
 func (c *ClusterTx) StoragePoolIDs() (map[string]int64, error) {
 	pools := []struct {
@@ -83,8 +102,162 @@ func (c *ClusterTx) StoragePoolConfigAdd(poolID, nodeID int64, config map[string
 const (
 	storagePoolPending int = iota // Storage pool defined but not yet created.
 	storagePoolCreated            // Storage pool created on all nodes.
+	storagePoolErrored            // Storage pool creation failed on some nodes
 )
 
+// StoragePoolCreatePending creates a new pending storage pool on the node with
+// the given name.
+func (c *ClusterTx) StoragePoolCreatePending(node, name, driver string, conf map[string]string) error {
+	// First check if a storage pool with the given name exists, and, if
+	// so, that it has a matching driver and it's in the pending state.
+	pool := struct {
+		id     int64
+		driver string
+		state  int
+	}{}
+
+	var errConsistency error
+	dest := func(i int) []interface{} {
+		// Sanity check that there is at most one pool with the given name.
+		if i != 0 {
+			errConsistency = fmt.Errorf("more than one pool exists with the given name")
+		}
+		return []interface{}{&pool.id, &pool.driver, &pool.state}
+	}
+	stmt := "SELECT id, driver, state FROM storage_pools WHERE name=?"
+	err := query.SelectObjects(c.tx, dest, stmt, name)
+	if err != nil {
+		return err
+	}
+	if errConsistency != nil {
+		return errConsistency
+	}
+
+	var poolID = pool.id
+	if poolID == 0 {
+		// No existing pool with the given name was found, let's create
+		// one.
+		columns := []string{"name", "driver"}
+		values := []interface{}{name, driver}
+		poolID, err = query.UpsertObject(c.tx, "storage_pools", columns, values)
+		if err != nil {
+			return err
+		}
+	} else {
+		// Check that the existing pools matches the given driver and
+		// is in the pending state.
+		if pool.driver != driver {
+			return fmt.Errorf("pool already exists with a different driver")
+		}
+		if pool.state != storagePoolPending {
+			return fmt.Errorf("pool is not in pending state")
+		}
+	}
+
+	// Get the ID of the node with the given name.
+	nodeInfo, err := c.NodeByName(node)
+	if err != nil {
+		return err
+	}
+
+	// Check that no storage_pool entry of this node and pool exists yet.
+	count, err := query.Count(
+		c.tx, "storage_pools_nodes", "storage_pool_id=? AND node_id=?", poolID, nodeInfo.ID)
+	if err != nil {
+		return err
+	}
+	if count != 0 {
+		return DbErrAlreadyDefined
+	}
+
+	// Insert the node-specific configuration.
+	columns := []string{"storage_pool_id", "node_id"}
+	values := []interface{}{poolID, nodeInfo.ID}
+	_, err = query.UpsertObject(c.tx, "storage_pools_nodes", columns, values)
+	if err != nil {
+		return err
+	}
+	err = c.StoragePoolConfigAdd(poolID, nodeInfo.ID, conf)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// StoragePoolCreated sets the state of the given pool to "CREATED".
+func (c *ClusterTx) StoragePoolCreated(name string) error {
+	return c.storagePoolState(name, storagePoolCreated)
+}
+
+// StoragePoolErrored sets the state of the given pool to "ERRORED".
+func (c *ClusterTx) StoragePoolErrored(name string) error {
+	return c.storagePoolState(name, storagePoolErrored)
+}
+
+func (c *ClusterTx) storagePoolState(name string, state int) error {
+	stmt := "UPDATE storage_pools SET state=? WHERE name=?"
+	result, err := c.tx.Exec(stmt, state, name)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return NoSuchObjectError
+	}
+	return nil
+}
+
+// StoragePoolNodeConfigs returns the node-specific configuration of all
+// nodes grouped by node name, for the given poolID.
+//
+// If the storage pool is not defined on all nodes, an error is returned.
+func (c *ClusterTx) StoragePoolNodeConfigs(poolID int64) (map[string]map[string]string, error) {
+	// Fetch all nodes.
+	nodes, err := c.Nodes()
+	if err != nil {
+		return nil, err
+	}
+
+	// Fetch the names of the nodes where the storage pool is defined.
+	stmt := `
+SELECT nodes.name FROM nodes
+  LEFT JOIN storage_pools_nodes ON storage_pools_nodes.node_id = nodes.id
+  LEFT JOIN storage_pools ON storage_pools_nodes.storage_pool_id = storage_pools.id
+WHERE storage_pools.id = ? AND storage_pools.state = ?
+`
+	defined, err := query.SelectStrings(c.tx, stmt, poolID, storagePoolPending)
+	if err != nil {
+		return nil, err
+	}
+
+	// Figure which nodes are missing
+	missing := []string{}
+	for _, node := range nodes {
+		if !shared.StringInSlice(node.Name, defined) {
+			missing = append(missing, node.Name)
+		}
+	}
+
+	if len(missing) > 0 {
+		return nil, fmt.Errorf("Pool not defined on nodes: %s", strings.Join(missing, ", "))
+	}
+
+	configs := map[string]map[string]string{}
+	for _, node := range nodes {
+		config, err := query.SelectConfig(c.tx, "storage_pools_config", "node_id=?", node.ID)
+		if err != nil {
+			return nil, err
+		}
+		configs[node.Name] = config
+	}
+
+	return configs, nil
+}
+
 // Get all storage pools.
 func (c *Cluster) StoragePools() ([]string, error) {
 	var name string
diff --git a/lxd/db/storage_pools_test.go b/lxd/db/storage_pools_test.go
new file mode 100644
index 000000000..b4a06081d
--- /dev/null
+++ b/lxd/db/storage_pools_test.go
@@ -0,0 +1,90 @@
+package db_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestStoragePoolsCreatePending(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+	_, err = tx.NodeAdd("rusp", "5.6.7.8:666")
+	require.NoError(t, err)
+
+	config := map[string]string{"source": "/foo"}
+	err = tx.StoragePoolCreatePending("buzz", "pool1", "dir", config)
+	require.NoError(t, err)
+
+	poolID, err := tx.StoragePoolID("pool1")
+	require.NoError(t, err)
+	assert.True(t, poolID > 0)
+
+	config = map[string]string{"source": "/bar"}
+	err = tx.StoragePoolCreatePending("rusp", "pool1", "dir", config)
+	require.NoError(t, err)
+
+	// The initial node (whose name is 'none' by default) is missing.
+	_, err = tx.StoragePoolNodeConfigs(poolID)
+	require.EqualError(t, err, "Pool not defined on nodes: none")
+
+	config = map[string]string{"source": "/egg"}
+	err = tx.StoragePoolCreatePending("none", "pool1", "dir", config)
+	require.NoError(t, err)
+
+	// Now the storage is defined on all nodes.
+	configs, err := tx.StoragePoolNodeConfigs(poolID)
+	require.NoError(t, err)
+	assert.Len(t, configs, 3)
+	assert.Equal(t, map[string]string{"source": "/foo"}, configs["buzz"])
+	assert.Equal(t, map[string]string{"source": "/bar"}, configs["rusp"])
+	assert.Equal(t, map[string]string{"source": "/egg"}, configs["none"])
+}
+
+// If an entry for the given pool and node already exists, an error is
+// returned.
+func TestStoragePoolsCreatePending_AlreadyDefined(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	err = tx.StoragePoolCreatePending("buzz", "pool1", "dir", map[string]string{})
+	require.NoError(t, err)
+
+	err = tx.StoragePoolCreatePending("buzz", "pool1", "dir", map[string]string{})
+	require.Equal(t, db.DbErrAlreadyDefined, err)
+}
+
+// If no node with the given name is found, an error is returned.
+func TestStoragePoolsCreatePending_NonExistingNode(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	err := tx.StoragePoolCreatePending("buzz", "pool1", "dir", map[string]string{})
+	require.Equal(t, db.NoSuchObjectError, err)
+}
+
+// If a pool with the given name already exists but has different driver, an
+// error is returned.
+func TestStoragePoolsCreatePending_DriverMismatch(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+	_, err = tx.NodeAdd("rusp", "5.6.7.8:666")
+	require.NoError(t, err)
+
+	err = tx.StoragePoolCreatePending("buzz", "pool1", "dir", map[string]string{})
+	require.NoError(t, err)
+
+	err = tx.StoragePoolCreatePending("rusp", "pool1", "zfs", map[string]string{})
+	require.EqualError(t, err, "pool already exists with a different driver")
+}

From 30ff2bfb97003b60a3fb4b7b5cc114f1d2d31731 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 15:59:48 +0000
Subject: [PATCH 110/227] Add targetNode query parameter to /1.0/storage-pools
 POST

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/lxd_storage_pools.go   |  6 +++++-
 doc/api-extensions.md         |  1 +
 lxd/.dir-locals.el            |  2 +-
 lxd/api_storage_pools_test.go | 40 ++++++++++++++++++++++++++++++++++++++
 lxd/db/storage_pools.go       | 14 ++++++++++++--
 lxd/storage_pools.go          | 45 ++++++++++++++++++++++++++++++++++++++++---
 shared/api/storage_pool.go    |  3 +++
 7 files changed, 104 insertions(+), 7 deletions(-)
 create mode 100644 lxd/api_storage_pools_test.go

diff --git a/client/lxd_storage_pools.go b/client/lxd_storage_pools.go
index 8e10288f1..717c0f22d 100644
--- a/client/lxd_storage_pools.go
+++ b/client/lxd_storage_pools.go
@@ -71,7 +71,11 @@ func (r *ProtocolLXD) CreateStoragePool(pool api.StoragePoolsPost) error {
 	}
 
 	// Send the request
-	_, _, err := r.query("POST", "/storage-pools", pool, "")
+	path := "/storage-pools"
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	_, _, err := r.query("POST", path, pool, "")
 	if err != nil {
 		return err
 	}
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index d84940730..26c62e6b2 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -413,3 +413,4 @@ This includes the following new endpoints:
 The following existing endpoints have been modified:
 
  * `POST /1.0/containers` accepts a new targetNode query parameter
+ * `POST /1.0/storage-pools` accepts a new targetNode query parameter
diff --git a/lxd/.dir-locals.el b/lxd/.dir-locals.el
index bf09f9074..9342fb083 100644
--- a/lxd/.dir-locals.el
+++ b/lxd/.dir-locals.el
@@ -1,7 +1,7 @@
 ;;; Directory Local Variables
 ;;; For more information see (info "(emacs) Directory Variables")
 ((go-mode
-  . ((go-test-args . "-tags libsqlite3 -timeout 25s")
+  . ((go-test-args . "-tags libsqlite3 -timeout 35s")
      (eval
       . (set
 	 (make-local-variable 'flycheck-go-build-tags)
diff --git a/lxd/api_storage_pools_test.go b/lxd/api_storage_pools_test.go
new file mode 100644
index 000000000..8c572f487
--- /dev/null
+++ b/lxd/api_storage_pools_test.go
@@ -0,0 +1,40 @@
+package main
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/shared/api"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Create a new pending storage pool using the targetNode query paramenter.
+func TestStoragePoolsCreate_TargetNode(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping storage-pools targetNode test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	daemon := daemons[0]
+	client := f.ClientUnix(daemon).ClusterTargetNode("rusp-0")
+
+	poolPost := api.StoragePoolsPost{
+		Name:   "mypool",
+		Driver: "dir",
+	}
+	poolPost.Config = map[string]string{
+		"source": "",
+	}
+
+	err := client.CreateStoragePool(poolPost)
+	require.NoError(t, err)
+
+	pool, _, err := client.GetStoragePool("mypool")
+	require.NoError(t, err)
+
+	assert.Equal(t, "PENDING", pool.State)
+}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 8c85aecc1..943b08f90 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -328,10 +328,11 @@ func (c *Cluster) StoragePoolGet(poolName string) (int64, *api.StoragePool, erro
 	var poolDriver string
 	poolID := int64(-1)
 	description := sql.NullString{}
+	var state int
 
-	query := "SELECT id, driver, description FROM storage_pools WHERE name=?"
+	query := "SELECT id, driver, description, state FROM storage_pools WHERE name=?"
 	inargs := []interface{}{poolName}
-	outargs := []interface{}{&poolID, &poolDriver, &description}
+	outargs := []interface{}{&poolID, &poolDriver, &description, &state}
 
 	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
@@ -353,6 +354,15 @@ func (c *Cluster) StoragePoolGet(poolName string) (int64, *api.StoragePool, erro
 	storagePool.Description = description.String
 	storagePool.Config = config
 
+	switch state {
+	case storagePoolPending:
+		storagePool.State = "PENDING"
+	case storagePoolCreated:
+		storagePool.State = "CREATED"
+	default:
+		storagePool.State = "UNKNOWN"
+	}
+
 	return poolID, &storagePool, nil
 }
 
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 9b609fb12..a113b227e 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -9,6 +9,7 @@ import (
 	"sync"
 
 	"github.com/gorilla/mux"
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared/api"
@@ -88,12 +89,50 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("No driver provided"))
 	}
 
-	err = storagePoolCreateInternal(d.State(), req.Name, req.Description, req.Driver, req.Config)
+	url := fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, req.Name)
+	response := SyncResponseLocation(true, nil, url)
+
+	targetNode := r.FormValue("targetNode")
+	if targetNode == "" {
+		count, err := cluster.Count(d.State())
+		if err != nil {
+			return SmartError(err)
+		}
+
+		if count == 1 {
+			// No targetNode was specified and we're either a single-node
+			// cluster or not clustered at all, so create the storage
+			// pool immediately.
+			err = storagePoolCreateInternal(
+				d.State(), req.Name, req.Description, req.Driver, req.Config)
+			if err != nil {
+				return InternalError(err)
+			}
+			return response
+		}
+
+		// No targetNode was specified and we're clustered. Check that
+		// the storage pool has been defined on all nodes and, if so,
+		// actually create it on all of them.
+		panic("TODO")
+	}
+
+	// A targetNode was specified, let's just define the node's storage
+	// without actually creating it. The only legal key value for the
+	// storage config is 'source'.
+	for key := range req.Config {
+		if key != "source" {
+			return SmartError(fmt.Errorf("Invalid config key '%s'", key))
+		}
+	}
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		return tx.StoragePoolCreatePending(targetNode, req.Name, req.Driver, req.Config)
+	})
 	if err != nil {
-		return InternalError(err)
+		return SmartError(err)
 	}
 
-	return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, req.Name))
+	return response
 }
 
 var storagePoolsCmd = Command{name: "storage-pools", get: storagePoolsGet, post: storagePoolsPost}
diff --git a/shared/api/storage_pool.go b/shared/api/storage_pool.go
index 157d2d275..ac5a2b3cb 100644
--- a/shared/api/storage_pool.go
+++ b/shared/api/storage_pool.go
@@ -19,6 +19,9 @@ type StoragePool struct {
 	Name   string   `json:"name" yaml:"name"`
 	Driver string   `json:"driver" yaml:"driver"`
 	UsedBy []string `json:"used_by" yaml:"used_by"`
+
+	// API extension: clustering
+	State string `json:"state" yaml:"state"`
 }
 
 // StoragePoolPut represents the modifiable fields of a LXD storage pool.

From 4d49b54f340ea507cf2e331b93b9df0e4bffbf91 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 16:33:01 +0000
Subject: [PATCH 111/227] Add STATE column to lxc storage list, when LXD is
 clustered

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/storage.go            | 24 +++++++++++++++++++-----
 test/main.sh              |  1 +
 test/suites/clustering.sh | 37 +++++++++++++++++++++++++++++++++++++
 3 files changed, 57 insertions(+), 5 deletions(-)

diff --git a/lxc/storage.go b/lxc/storage.go
index 94a7a4b10..2e265d114 100644
--- a/lxc/storage.go
+++ b/lxc/storage.go
@@ -679,20 +679,34 @@ func (c *storageCmd) doStoragePoolsList(conf *config.Config, args []string) erro
 	data := [][]string{}
 	for _, pool := range pools {
 		usedby := strconv.Itoa(len(pool.UsedBy))
-
-		data = append(data, []string{pool.Name, pool.Description, pool.Driver, pool.Config["source"], usedby})
+		details := []string{pool.Name, pool.Description, pool.Driver}
+		if client.IsClustered() {
+			details = append(details, pool.State)
+		} else {
+			details = append(details, pool.Config["source"])
+		}
+		details = append(details, usedby)
+		data = append(data, details)
 	}
 
 	table := tablewriter.NewWriter(os.Stdout)
 	table.SetAutoWrapText(false)
 	table.SetAlignment(tablewriter.ALIGN_LEFT)
 	table.SetRowLine(true)
-	table.SetHeader([]string{
+
+	header := []string{
 		i18n.G("NAME"),
 		i18n.G("DESCRIPTION"),
 		i18n.G("DRIVER"),
-		i18n.G("SOURCE"),
-		i18n.G("USED BY")})
+	}
+	if client.IsClustered() {
+		header = append(header, i18n.G("STATE"))
+	} else {
+		header = append(header, i18n.G("SOURCE"))
+	}
+	header = append(header, i18n.G("USED BY"))
+	table.SetHeader(header)
+
 	sort.Sort(byName(data))
 	table.AppendBulk(data)
 	table.Render()
diff --git a/test/main.sh b/test/main.sh
index 80905322a..1bfe44958 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -196,6 +196,7 @@ run_test test_console "console"
 run_test test_proxy_device "proxy device"
 run_test test_clustering_membership "clustering membership"
 run_test test_clustering_containers "clustering containers"
+run_test test_clustering_storage "clustering storage"
 
 # shellcheck disable=SC2034
 TEST_RESULT=success
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 5c90dc980..3f9b3d167 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -167,3 +167,40 @@ test_clustering_containers() {
   teardown_clustering_netns
   teardown_clustering_bridge
 }
+
+test_clustering_storage() {
+  setup_clustering_bridge
+  prefix="lxd$$"
+  bridge="${prefix}"
+
+  setup_clustering_netns 1
+  LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_ONE_DIR}"
+  ns1="${prefix}1"
+  spawn_lxd_and_bootstrap_cluster "${ns1}" "${bridge}" "${LXD_ONE_DIR}"
+
+  # The state of the preseeded storage pool shows up as CREATED
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage list | grep data | grep -q CREATED
+
+  # Add a newline at the end of each line. YAML as weird rules..
+  cert=$(sed ':a;N;$!ba;s/\n/\n\n/g' "${LXD_ONE_DIR}/server.crt")
+
+  # Spawn a second node
+  setup_clustering_netns 2
+  LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_TWO_DIR}"
+  ns2="${prefix}2"
+  spawn_lxd_and_join_cluster "${ns2}" "${bridge}" "${cert}" 2 1 "${LXD_TWO_DIR}"
+
+  # The state of the preseeded storage pool is still CREATED
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage list | grep data | grep -q CREATED
+
+  LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
+  LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
+  sleep 2
+  rm -f "${LXD_TWO_DIR}/unix.socket"
+  rm -f "${LXD_ONE_DIR}/unix.socket"
+
+  teardown_clustering_netns
+  teardown_clustering_bridge
+}

From af87d9de7c7ca44b5565b72f88b1ff4a4bbc7206 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 17:20:15 +0000
Subject: [PATCH 112/227] Add --target command line option to lxc storage
 create

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/storage.go            | 16 ++++++++++++++--
 test/suites/clustering.sh |  8 ++++++++
 2 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/lxc/storage.go b/lxc/storage.go
index 2e265d114..13435dd8d 100644
--- a/lxc/storage.go
+++ b/lxc/storage.go
@@ -24,6 +24,7 @@ import (
 type storageCmd struct {
 	resources bool
 	byteflag  bool
+	target    string
 }
 
 func (c *storageCmd) showByDefault() bool {
@@ -77,7 +78,7 @@ lxc storage show [<remote>:]<pool> [--resources]
 lxc storage info [<remote>:]<pool> [--bytes]
     Show information of a storage pool in yaml format.
 
-lxc storage create [<remote>:]<pool> <driver> [key=value]...
+lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target <node>]
     Create a storage pool.
 
 lxc storage get [<remote>:]<pool> <key>
@@ -154,6 +155,7 @@ lxc storage volume show default container/data
 func (c *storageCmd) flags() {
 	gnuflag.BoolVar(&c.resources, "resources", false, i18n.G("Show the resources available to the storage pool"))
 	gnuflag.BoolVar(&c.byteflag, "bytes", false, i18n.G("Show the used and free space in bytes"))
+	gnuflag.StringVar(&c.target, "target", "", i18n.G("Node name"))
 }
 
 func (c *storageCmd) run(conf *config.Config, args []string) error {
@@ -497,13 +499,23 @@ func (c *storageCmd) doStoragePoolCreate(client lxd.ContainerServer, name string
 		pool.Config[entry[0]] = entry[1]
 	}
 
+	// If a target node was specified the API won't actually create the
+	// pool, but only define it as pending in the database.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	// Create the pool
 	err := client.CreateStoragePool(pool)
 	if err != nil {
 		return err
 	}
 
-	fmt.Printf(i18n.G("Storage pool %s created")+"\n", name)
+	if c.target != "" {
+		fmt.Printf(i18n.G("Storage pool %s pending on node %s")+"\n", name, c.target)
+	} else {
+		fmt.Printf(i18n.G("Storage pool %s created")+"\n", name)
+	}
 
 	return nil
 }
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 3f9b3d167..9a019bb45 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -195,6 +195,14 @@ test_clustering_storage() {
   # The state of the preseeded storage pool is still CREATED
   LXD_DIR="${LXD_ONE_DIR}" lxc storage list | grep data | grep -q CREATED
 
+  # Trying to pass config values other than 'source' results in an error
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir source=/foo size=123 --target node1
+
+  # Create a new storage pool
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node1
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node2
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep state: | grep -q PENDING
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From 7c712dede908829a8a99caa475243ed4433c6bad Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 7 Dec 2017 09:23:15 +0000
Subject: [PATCH 113/227] Add client.ClusterNodeName() returning the name of
 the remote node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go    |  3 ++-
 client/lxd_server.go    |  5 +++++
 lxd/api_1.0.go          | 13 ++++++++++++-
 lxd/api_cluster_test.go |  1 +
 lxd/db/node.go          | 17 +++++++++++++++++
 lxd/db/node_test.go     | 11 +++++++++++
 shared/api/server.go    |  3 ++-
 7 files changed, 50 insertions(+), 3 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 5e33e0531..fa40cdce4 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -50,7 +50,8 @@ type ContainerServer interface {
 	HasExtension(extension string) (exists bool)
 	RequireAuthenticated(authenticated bool)
 	IsClustered() (clustered bool)
-	ClusterTargetNode(name string) ContainerServer
+	ClusterTargetNode(name string) (client ContainerServer)
+	ClusterNodeName() (name string)
 
 	// Certificate functions
 	GetCertificateFingerprints() (fingerprints []string, err error)
diff --git a/client/lxd_server.go b/client/lxd_server.go
index ec5f09857..7fddae220 100644
--- a/client/lxd_server.go
+++ b/client/lxd_server.go
@@ -105,3 +105,8 @@ func (r *ProtocolLXD) ClusterTargetNode(name string) ContainerServer {
 		targetNode:           name,
 	}
 }
+
+// ClusterNodeName returns the name of the node this client is pointing to.
+func (r *ProtocolLXD) ClusterNodeName() string {
+	return r.server.Environment.NodeName
+}
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 5926e6d40..1f259d379 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -122,6 +122,15 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	nodeName := ""
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodeName, err = tx.NodeName()
+		return err
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+
 	certificate := string(d.endpoints.NetworkPublicKey())
 	var certificateFingerprint string
 	if certificate != "" {
@@ -154,7 +163,9 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		Server:                 "lxd",
 		ServerPid:              os.Getpid(),
 		ServerVersion:          version.Version,
-		Clustered:              clustered}
+		Clustered:              clustered,
+		NodeName:               nodeName,
+	}
 
 	drivers := readStoragePoolDriversCache()
 	for driver, version := range drivers {
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index e4f187478..315f13b7c 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -30,6 +30,7 @@ func TestCluster_Bootstrap(t *testing.T) {
 	_, _, err = client.GetServer()
 	require.NoError(t, err)
 	assert.True(t, client.IsClustered())
+	assert.Equal(t, "buzz", client.ClusterNodeName())
 }
 
 // A LXD node which is already configured for networking can join an existing
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 172d79448..0fac94c64 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -61,6 +61,23 @@ func (c *ClusterTx) NodeByName(name string) (NodeInfo, error) {
 	}
 }
 
+// NodeName returns the name of the node this method is invoked on.
+func (c *ClusterTx) NodeName() (string, error) {
+	stmt := "SELECT name FROM nodes WHERE id=?"
+	names, err := query.SelectStrings(c.tx, stmt, c.nodeID)
+	if err != nil {
+		return "", err
+	}
+	switch len(names) {
+	case 0:
+		return "", nil
+	case 1:
+		return names[0], nil
+	default:
+		return "", fmt.Errorf("inconsistency: non-unique node ID")
+	}
+}
+
 // Nodes returns all LXD nodes part of the cluster.
 //
 // If this LXD instance is not clustered, a list with a single node whose
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index d95363746..439240b14 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -53,6 +53,17 @@ func TestNodesCount(t *testing.T) {
 	assert.Equal(t, 2, count)
 }
 
+func TestNodeName(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	name, err := tx.NodeName()
+	require.NoError(t, err)
+
+	// The default node 1 has a conventional name 'none'.
+	assert.Equal(t, "none", name)
+}
+
 // Rename a node
 func TestNodeRename(t *testing.T) {
 	tx, cleanup := db.NewTestClusterTx(t)
diff --git a/shared/api/server.go b/shared/api/server.go
index 570041533..b0989571b 100644
--- a/shared/api/server.go
+++ b/shared/api/server.go
@@ -18,7 +18,8 @@ type ServerEnvironment struct {
 	StorageVersion         string   `json:"storage_version" yaml:"storage_version"`
 
 	// API extension: clustering
-	Clustered bool `json:"clustered" yaml:"clustered"`
+	Clustered bool   `json:"clustered" yaml:"clustered"`
+	NodeName  string `json:"node_name" yaml:"node_name"`
 }
 
 // ServerPut represents the modifiable fields of a LXD server configuration

From c8b1dc9967adc541ffd2da49c0505ed194d61492 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 7 Dec 2017 11:17:27 +0000
Subject: [PATCH 114/227] Create a storage pool across all nodes of a cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_storage_pools_test.go |  38 ++++++++++++++
 lxd/storage_pools.go          | 112 +++++++++++++++++++++++++++++++++++++++---
 lxd/storage_pools_utils.go    |   7 +++
 test/suites/clustering.sh     |   9 +++-
 4 files changed, 157 insertions(+), 9 deletions(-)

diff --git a/lxd/api_storage_pools_test.go b/lxd/api_storage_pools_test.go
index 8c572f487..c4cf3adbd 100644
--- a/lxd/api_storage_pools_test.go
+++ b/lxd/api_storage_pools_test.go
@@ -38,3 +38,41 @@ func TestStoragePoolsCreate_TargetNode(t *testing.T) {
 
 	assert.Equal(t, "PENDING", pool.State)
 }
+
+// An error is returned when trying to create a new storage pool in a cluster
+// where the pool was not defined on all nodes.
+func TestStoragePoolsCreate_MissingNodes(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping storage-pools targetNode test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	// Define the pool on rusp-0.
+	daemon := daemons[0]
+	client := f.ClientUnix(daemon).ClusterTargetNode("rusp-0")
+
+	poolPost := api.StoragePoolsPost{
+		Name:   "mypool",
+		Driver: "dir",
+	}
+	poolPost.Config = map[string]string{
+		"source": "",
+	}
+
+	err := client.CreateStoragePool(poolPost)
+	require.NoError(t, err)
+
+	// Trying to create the pool now results in an error, since it's not
+	// defined on all nodes.
+	poolPost = api.StoragePoolsPost{
+		Name:   "mypool",
+		Driver: "dir",
+	}
+	client = f.ClientUnix(daemon)
+	err = client.CreateStoragePool(poolPost)
+	require.EqualError(t, err, "Pool not defined on nodes: buzz")
+}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index a113b227e..bab4ef6aa 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -9,6 +9,7 @@ import (
 	"sync"
 
 	"github.com/gorilla/mux"
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
@@ -92,6 +93,18 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 	url := fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, req.Name)
 	response := SyncResponseLocation(true, nil, url)
 
+	if isClusterNotification(r) {
+		// This is an internal request which triggers the actual
+		// creation of the pool across all nodes, after they have been
+		// previously defined.
+		err = doStoragePoolCreateInternal(
+			d.State(), req.Name, req.Description, req.Driver, req.Config)
+		if err != nil {
+			return SmartError(err)
+		}
+		return response
+	}
+
 	targetNode := r.FormValue("targetNode")
 	if targetNode == "" {
 		count, err := cluster.Count(d.State())
@@ -105,16 +118,16 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 			// pool immediately.
 			err = storagePoolCreateInternal(
 				d.State(), req.Name, req.Description, req.Driver, req.Config)
-			if err != nil {
-				return InternalError(err)
-			}
-			return response
+		} else {
+			// No targetNode was specified and we're clustered, so finalize the
+			// config in the db and actually create the pool on all nodes.
+			err = storagePoolsPostCluster(d, req)
+		}
+		if err != nil {
+			return InternalError(err)
 		}
+		return response
 
-		// No targetNode was specified and we're clustered. Check that
-		// the storage pool has been defined on all nodes and, if so,
-		// actually create it on all of them.
-		panic("TODO")
 	}
 
 	// A targetNode was specified, let's just define the node's storage
@@ -135,6 +148,89 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 	return response
 }
 
+func storagePoolsPostCluster(d *Daemon, req api.StoragePoolsPost) error {
+	// Check that no 'source' config key has been defined, since
+	// that's node-specific.
+	for key := range req.Config {
+		if key == "source" {
+			return fmt.Errorf("Config key 'source' is node-specific")
+		}
+	}
+
+	// Check that the pool is properly defined, fetch the node-specific
+	// configs and insert the global config.
+	var configs map[string]map[string]string
+	var nodeName string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		// Check that the pool was defined at all.
+		poolID, err := tx.StoragePoolID(req.Name)
+		if err != nil {
+			return err
+		}
+
+		// Fetch the node-specific configs.
+		configs, err = tx.StoragePoolNodeConfigs(poolID)
+		if err != nil {
+			return err
+		}
+
+		// Take note of the name of this node
+		nodeName, err = tx.NodeName()
+		if err != nil {
+			return err
+		}
+
+		// Insert the global config keys.
+		return tx.StoragePoolConfigAdd(poolID, 0, req.Config)
+	})
+	if err != nil {
+		return err
+	}
+
+	// Create the pool on this node.
+	nodeReq := req
+	for key, value := range configs[nodeName] {
+		nodeReq.Config[key] = value
+	}
+	err = doStoragePoolCreateInternal(
+		d.State(), req.Name, req.Description, req.Driver, req.Config)
+	if err != nil {
+		return err
+	}
+
+	// Notify all other nodes to create the pool.
+	notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAll)
+	if err != nil {
+		return err
+	}
+	notifyErr := notifier(func(client lxd.ContainerServer) error {
+		_, _, err := client.GetServer()
+		if err != nil {
+			return err
+		}
+		nodeReq := req
+		for key, value := range configs[client.ClusterNodeName()] {
+			nodeReq.Config[key] = value
+		}
+		return client.CreateStoragePool(nodeReq)
+	})
+
+	errored := notifyErr != nil
+
+	// Finally update the storage pool state.
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		if errored {
+			return tx.StoragePoolErrored(req.Name)
+		}
+		return tx.StoragePoolCreated(req.Name)
+	})
+	if err != nil {
+		return err
+	}
+
+	return notifyErr
+}
+
 var storagePoolsCmd = Command{name: "storage-pools", get: storagePoolsGet, post: storagePoolsPost}
 
 // /1.0/storage-pools/{name}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 5d2c044ac..7e5a80492 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -211,7 +211,14 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 		}
 		dbStoragePoolDeleteAndUpdateCache(state.Cluster, poolName)
 	}()
+	err = doStoragePoolCreateInternal(state, poolName, poolDescription, driver, config)
+	tryUndo = err != nil
+	return err
+}
 
+// This performs all non-db related work needed to create the pool.
+func doStoragePoolCreateInternal(state *state.State, poolName, poolDescription string, driver string, config map[string]string) error {
+	tryUndo := true
 	s, err := storagePoolInit(state, poolName)
 	if err != nil {
 		return err
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 9a019bb45..08158711c 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -198,11 +198,18 @@ test_clustering_storage() {
   # Trying to pass config values other than 'source' results in an error
   ! LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir source=/foo size=123 --target node1
 
-  # Create a new storage pool
+  # Define storage pools on the two nodes
   LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node1
   LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node2
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep state: | grep -q PENDING
 
+  # The source config key is not legal for the final pool creation
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir source=/foo
+
+  # Create the storage pool
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 dir
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep state: | grep -q CREATED
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From 6a718c29e450f8d06d918e229782ff326ca1761d Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 7 Dec 2017 13:29:14 +0000
Subject: [PATCH 115/227] Compare global pools/networks configs when a node
 requests to join

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go      |   2 +-
 client/lxd_cluster.go     |   4 +-
 lxd/api_cluster.go        | 108 +++++++++++++++++++++++++++++++++++++++++++++-
 lxd/cluster/membership.go |  14 +++---
 lxd/db/migration.go       |  10 +++++
 lxd/db/networks.go        |  10 ++++-
 lxd/main_init.go          |  17 +++-----
 lxd/main_init_test.go     |   7 +--
 lxd/util/config.go        |  16 +++++++
 shared/api/cluster.go     |  18 ++++----
 10 files changed, 171 insertions(+), 35 deletions(-)
 create mode 100644 lxd/util/config.go

diff --git a/client/interfaces.go b/client/interfaces.go
index fa40cdce4..ee44d282f 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -168,7 +168,7 @@ type ContainerServer interface {
 	// Cluster functions ("cluster" API extensions)
 	GetCluster(password string) (cluster *api.Cluster, err error)
 	BootstrapCluster(name string) (op *Operation, err error)
-	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
+	AcceptNode(targetPassword, name, address string, schema, api int, pools []api.StoragePool, networks []api.Network) (info *api.ClusterNodeAccepted, err error)
 	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
 	LeaveCluster(name string, force bool) (err error)
 	GetNodes() (nodes []api.Node, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 93e9a6d6b..50ce1e3da 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -36,13 +36,15 @@ func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
 }
 
 // AcceptNode requests to accept a new node into the cluster.
-func (r *ProtocolLXD) AcceptNode(targetPassword, name, address string, schema, apiExt int) (*api.ClusterNodeAccepted, error) {
+func (r *ProtocolLXD) AcceptNode(targetPassword, name, address string, schema, apiExt int, pools []api.StoragePool, networks []api.Network) (*api.ClusterNodeAccepted, error) {
 	cluster := api.ClusterPost{
 		Name:           name,
 		Address:        address,
 		Schema:         schema,
 		API:            apiExt,
 		TargetPassword: targetPassword,
+		StoragePools:   pools,
+		Networks:       networks,
 	}
 	info := &api.ClusterNodeAccepted{}
 	_, err := r.queryStruct("POST", "/cluster/nodes", cluster, "", &info)
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 1b1ca19ce..42a82d02f 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -195,6 +195,18 @@ func clusterNodesPostAccept(d *Daemon, req api.ClusterPost) Response {
 	if util.PasswordCheck(secret, req.TargetPassword) != nil {
 		return Forbidden
 	}
+
+	// Check that the pools and networks provided by the joining node have
+	// configs that match the cluster ones.
+	err = clusterCheckStoragePoolsMatch(d.cluster, req.StoragePools)
+	if err != nil {
+		return SmartError(err)
+	}
+	err = clusterCheckNetworksMatch(d.cluster, req.Networks)
+	if err != nil {
+		return SmartError(err)
+	}
+
 	nodes, err := cluster.Accept(d.State(), d.gateway, req.Name, req.Address, req.Schema, req.API)
 	if err != nil {
 		return BadRequest(err)
@@ -210,6 +222,71 @@ func clusterNodesPostAccept(d *Daemon, req api.ClusterPost) Response {
 	return SyncResponse(true, accepted)
 }
 
+func clusterCheckStoragePoolsMatch(cluster *db.Cluster, reqPools []api.StoragePool) error {
+	poolNames, err := cluster.StoragePools()
+	if err != nil && err != db.NoSuchObjectError {
+		return err
+	}
+	for _, name := range poolNames {
+		found := false
+		for _, reqPool := range reqPools {
+			if reqPool.Name != name {
+				continue
+			}
+			found = true
+			_, pool, err := cluster.StoragePoolGet(name)
+			if err != nil {
+				return err
+			}
+			if pool.Driver != reqPool.Driver {
+				return fmt.Errorf("Mismatching driver for storage pool %s", name)
+			}
+			// Exclude the "source" key, which is node-specific.
+			delete(pool.Config, "source")
+			delete(reqPool.Config, "source")
+			if !util.CompareConfigs(pool.Config, reqPool.Config) {
+				return fmt.Errorf("Mismatching config for storage pool %s", name)
+			}
+			break
+		}
+		if !found {
+			return fmt.Errorf("Missing storage pool %s", name)
+		}
+	}
+	return nil
+}
+
+func clusterCheckNetworksMatch(cluster *db.Cluster, reqNetworks []api.Network) error {
+	networkNames, err := cluster.Networks()
+	if err != nil && err != db.NoSuchObjectError {
+		return err
+	}
+	for _, name := range networkNames {
+		found := false
+		for _, reqNetwork := range reqNetworks {
+			if reqNetwork.Name != name {
+				continue
+			}
+			found = true
+			_, network, err := cluster.NetworkGet(name)
+			if err != nil {
+				return err
+			}
+			// Exclude the "bridge.external_interfaces" key, which is node-specific.
+			delete(network.Config, "bridge.external_interfaces")
+			delete(reqNetwork.Config, "bridge.external_interfaces")
+			if !util.CompareConfigs(network.Config, reqNetwork.Config) {
+				return fmt.Errorf("Mismatching config for network %s", name)
+			}
+			break
+		}
+		if !found {
+			return fmt.Errorf("Missing network %s", name)
+		}
+	}
+	return nil
+}
+
 func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 	// Make sure basic pre-conditions are ment.
 	if len(req.TargetCert) == 0 {
@@ -217,12 +294,39 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 	}
 	address, err := node.HTTPSAddress(d.db)
 	if err != nil {
-		return InternalError(err)
+		return SmartError(err)
 	}
 	if address == "" {
 		return BadRequest(fmt.Errorf("No core.https_address config key is set on this node"))
 	}
 
+	// Get all defined storage pools and networks, so they can be compared
+	// to the ones in the cluster.
+	pools := []api.StoragePool{}
+	poolNames, err := d.cluster.StoragePools()
+	if err != nil && err != db.NoSuchObjectError {
+		return SmartError(err)
+	}
+	for _, name := range poolNames {
+		_, pool, err := d.cluster.StoragePoolGet(name)
+		if err != nil {
+			return SmartError(err)
+		}
+		pools = append(pools, *pool)
+	}
+	networks := []api.Network{}
+	networkNames, err := d.cluster.Networks()
+	if err != nil && err != db.NoSuchObjectError {
+		return SmartError(err)
+	}
+	for _, name := range networkNames {
+		_, network, err := d.cluster.NetworkGet(name)
+		if err != nil {
+			return SmartError(err)
+		}
+		networks = append(networks, *network)
+	}
+
 	// Client parameters to connect to the target cluster node.
 	args := &lxd.ConnectionArgs{
 		TLSServerCert: string(req.TargetCert),
@@ -239,7 +343,7 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 		}
 		info, err := client.AcceptNode(
 			req.TargetPassword, req.Name, address, cluster.SchemaVersion,
-			len(version.APIExtensions))
+			len(version.APIExtensions), pools, networks)
 		if err != nil {
 			return errors.Wrap(err, "failed to request to add node")
 		}
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 447d189a4..8066db3b3 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -227,11 +227,11 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 		return err
 	}
 
-	// Get the local config keys for the cluster networks. It assumes that
-	// the local storage pools and networks match the cluster networks, if
-	// not an error will be returned. Also get any outstanding operation,
-	// typically there will be just one, created by the POST /cluster/nodes
-	// request which triggered this code.
+	// Get the local config keys for the cluster pools and networks. It
+	// assumes that the local storage pools and networks match the cluster
+	// networks, if not an error will be returned. Also get any outstanding
+	// operation, typically there will be just one, created by the POST
+	// /cluster/nodes request which triggered this code.
 	var pools map[string]map[string]string
 	var networks map[string]map[string]string
 	var operations []string
@@ -345,6 +345,10 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 			if err != nil {
 				return errors.Wrap(err, "failed to add joining node's to the network")
 			}
+			// We only need to add the bridge.external_interfaces
+			// key, since the other keys are global and are already
+			// there.
+			config = map[string]string{"bridge.external_interfaces": config["bridge.external_interfaces"]}
 			err = tx.NetworkConfigAdd(id, node.ID, config)
 			if err != nil {
 				return errors.Wrap(err, "failed to add joining node's network config")
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 4a424bf7e..4be14d2b0 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -98,6 +98,16 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 			case "containers":
 				fallthrough
 			case "networks_config":
+				// The "bridge.external_interfaces" config key
+				// is the only one which is not global to the
+				// cluster, so all other keys will have a NULL
+				// node_id.
+				for i, column := range columns {
+					if column == "key" && row[i] != "bridge.external_interfaces" {
+						nullNodeID = true
+						break
+					}
+				}
 				appendNodeID()
 			case "storage_pools_config":
 				// The "source" config key is the only one
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index e08db3736..9115e7e46 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -168,7 +168,7 @@ func (c *Cluster) NetworkConfigGet(id int64) (map[string]string, error) {
             key, value
         FROM networks_config
 		WHERE network_id=?
-                AND node_id=?`
+                AND (node_id=? OR node_id IS NULL)`
 	inargs := []interface{}{id, c.nodeID}
 	outfmt := []interface{}{key, value}
 	results, err := queryScan(c.db, query, inargs, outfmt)
@@ -295,8 +295,14 @@ func networkConfigAdd(tx *sql.Tx, networkID, nodeID int64, config map[string]str
 		if v == "" {
 			continue
 		}
+		var nodeIDValue interface{}
+		if k != "bridge.external_interfaces" {
+			nodeIDValue = nil
+		} else {
+			nodeIDValue = nodeID
+		}
 
-		_, err = stmt.Exec(networkID, nodeID, k, v)
+		_, err = stmt.Exec(networkID, nodeIDValue, k, v)
 		if err != nil {
 			return err
 		}
diff --git a/lxd/main_init.go b/lxd/main_init.go
index 55e77e1da..6f0a0cbb0 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -6,7 +6,6 @@ import (
 	"net"
 	"os"
 	"os/exec"
-	"sort"
 	"strconv"
 	"strings"
 	"syscall"
@@ -870,17 +869,13 @@ func (cmd *CmdInit) askClusteringNetworks(cluster *api.Cluster) ([]api.NetworksP
 		post.Config = network.Config
 		post.Type = network.Type
 		post.Managed = true
+		// The only config key to ask is 'bridge.external_interfaces',
+		// which is the only one node-specific.
+		key := "bridge.external_interfaces"
 		// Sort config keys to get a stable ordering (expecially for tests)
-		keys := []string{}
-		for key := range post.Config {
-			keys = append(keys, key)
-		}
-		sort.Strings(keys)
-		for _, key := range keys {
-			question := fmt.Sprintf(
-				`Enter local value for key "%s" of network "%s": `, key, post.Name)
-			post.Config[key] = cmd.Context.AskString(question, "", nil)
-		}
+		question := fmt.Sprintf(
+			`Enter local value for key "%s" of network "%s": `, key, post.Name)
+		post.Config[key] = cmd.Context.AskString(question, "", nil)
 		networks[i] = post
 	}
 	return networks, nil
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 873521f38..f364297df 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -195,11 +195,8 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
 		ClusterAcceptFingerprint: true,
 		ClusterConfirmLosingData: true,
 		ClusterConfig: []string{
-			"",               // storage source
-			"10.23.189.2/24", // ipv4.address
-			"true",           // ipv4.nat
-			"aaaa:bbbb:cccc:dddd::1/64", // ipv6.address
-			"true", // ipv6.nat
+			"", // storage source
+			"", // bridge.external_interfaces
 		},
 	}
 	answers.Render(suite.streams)
diff --git a/lxd/util/config.go b/lxd/util/config.go
new file mode 100644
index 000000000..782dca01e
--- /dev/null
+++ b/lxd/util/config.go
@@ -0,0 +1,16 @@
+package util
+
+// CompareConfigs compares two config maps and returns true if they are equal.
+func CompareConfigs(config1, config2 map[string]string) bool {
+	for key, value := range config1 {
+		if config2[key] != value {
+			return false
+		}
+	}
+	for key, value := range config2 {
+		if config1[key] != value {
+			return false
+		}
+	}
+	return true
+}
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 1320a1ff4..e68ce5551 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -11,14 +11,16 @@ type Cluster struct {
 //
 // API extension: cluster
 type ClusterPost struct {
-	Name           string `json:"name" yaml:"name"`
-	Address        string `json:"address" yaml:"address"`
-	Schema         int    `json:"schema" yaml:"schema"`
-	API            int    `json:"api" yaml:"api"`
-	TargetAddress  string `json:"target_address" yaml:"target_address"`
-	TargetCert     string `json:"target_cert" yaml:"target_cert"`
-	TargetCA       []byte `json:"target_ca" yaml:"target_ca"`
-	TargetPassword string `json:"target_password" yaml:"target_password"`
+	Name           string        `json:"name" yaml:"name"`
+	Address        string        `json:"address" yaml:"address"`
+	Schema         int           `json:"schema" yaml:"schema"`
+	API            int           `json:"api" yaml:"api"`
+	TargetAddress  string        `json:"target_address" yaml:"target_address"`
+	TargetCert     string        `json:"target_cert" yaml:"target_cert"`
+	TargetCA       []byte        `json:"target_ca" yaml:"target_ca"`
+	TargetPassword string        `json:"target_password" yaml:"target_password"`
+	StoragePools   []StoragePool `json:"storage_pools" yaml:"storage_pools"`
+	Networks       []Network     `json:"networks" yaml:"networks"`
 }
 
 // ClusterNodeAccepted represents the response of a request to join a cluster.

From 1dfff87f7224b5f8c72a7fcab6b53a7147c69f25 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 11 Dec 2017 11:48:02 +0000
Subject: [PATCH 116/227] Show the 'source' pool config key only when targeting
 a node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/lxd_storage_pools.go    |  6 +++++-
 doc/api-extensions.md          |  1 +
 lxc/storage.go                 |  7 ++++++-
 lxd/api_1.0.go                 | 10 +---------
 lxd/cluster/membership.go      | 15 +++++++++++++++
 lxd/cluster/membership_test.go |  4 ++++
 lxd/cluster/resolve.go         | 25 +++++++++++++++++++++++++
 lxd/containers_post.go         | 12 +-----------
 lxd/storage_pools.go           | 32 ++++++++++++++++++++++++++++++++
 test/suites/clustering.sh      |  7 +++++++
 10 files changed, 97 insertions(+), 22 deletions(-)
 create mode 100644 lxd/cluster/resolve.go

diff --git a/client/lxd_storage_pools.go b/client/lxd_storage_pools.go
index 717c0f22d..b4a2a326b 100644
--- a/client/lxd_storage_pools.go
+++ b/client/lxd_storage_pools.go
@@ -52,7 +52,11 @@ func (r *ProtocolLXD) GetStoragePool(name string) (*api.StoragePool, string, err
 	pool := api.StoragePool{}
 
 	// Fetch the raw value
-	etag, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s", url.QueryEscape(name)), nil, "", &pool)
+	path := fmt.Sprintf("/storage-pools/%s", url.QueryEscape(name))
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	etag, err := r.queryStruct("GET", path, nil, "", &pool)
 	if err != nil {
 		return nil, "", err
 	}
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 26c62e6b2..9283aab10 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -414,3 +414,4 @@ The following existing endpoints have been modified:
 
  * `POST /1.0/containers` accepts a new targetNode query parameter
  * `POST /1.0/storage-pools` accepts a new targetNode query parameter
+ * `GET /1.0/storage-pool/<name>` accepts a new targetNode query parameter
diff --git a/lxc/storage.go b/lxc/storage.go
index 13435dd8d..101f53df9 100644
--- a/lxc/storage.go
+++ b/lxc/storage.go
@@ -72,7 +72,7 @@ Manage storage pools and volumes.
 lxc storage list [<remote>:]
     List available storage pools.
 
-lxc storage show [<remote>:]<pool> [--resources]
+lxc storage show [<remote>:]<pool> [--resources] [--target <node>]
     Show details of a storage pool.
 
 lxc storage info [<remote>:]<pool> [--bytes]
@@ -769,6 +769,11 @@ func (c *storageCmd) doStoragePoolShow(client lxd.ContainerServer, name string)
 		return errArgs
 	}
 
+	// If a target node was specified, we return also node-specific config values.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	if c.resources {
 		res, err := client.GetStoragePoolResources(name)
 		if err != nil {
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 1f259d379..8c1654db9 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -109,15 +109,7 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
-	clustered := false
-	err = d.db.Transaction(func(tx *db.NodeTx) error {
-		addresses, err := tx.RaftNodeAddresses()
-		if err != nil {
-			return err
-		}
-		clustered = len(addresses) > 0
-		return nil
-	})
+	clustered, err := cluster.Enabled(d.db)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 8066db3b3..e55ba85ba 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -502,6 +502,21 @@ func Count(state *state.State) (int, error) {
 	return count, err
 }
 
+// Enabled is a convenience that returns true if clustering is enabled on this
+// node.
+func Enabled(node *db.Node) (bool, error) {
+	enabled := false
+	err := node.Transaction(func(tx *db.NodeTx) error {
+		addresses, err := tx.RaftNodeAddresses()
+		if err != nil {
+			return err
+		}
+		enabled = len(addresses) > 0
+		return nil
+	})
+	return enabled, err
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index f38f43c65..d1d86f4eb 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -131,6 +131,10 @@ func TestBootstrap(t *testing.T) {
 	count, err := cluster.Count(state)
 	require.NoError(t, err)
 	assert.Equal(t, 1, count)
+
+	enabled, err := cluster.Enabled(state.Node)
+	require.NoError(t, err)
+	assert.True(t, enabled)
 }
 
 // If pre-conditions are not met, a descriptive error is returned.
diff --git a/lxd/cluster/resolve.go b/lxd/cluster/resolve.go
new file mode 100644
index 000000000..6aece834b
--- /dev/null
+++ b/lxd/cluster/resolve.go
@@ -0,0 +1,25 @@
+package cluster
+
+import "github.com/lxc/lxd/lxd/db"
+
+// ResolveTarget is a convenience for handling the value ?targetNode query
+// parameter. It returns the address of the given node, or the empty string if
+// the given node is the local one.
+func ResolveTarget(cluster *db.Cluster, target string) (string, error) {
+	address := ""
+	err := cluster.Transaction(func(tx *db.ClusterTx) error {
+		name, err := tx.NodeName()
+		if err != nil {
+			return err
+		}
+		node, err := tx.NodeByName(target)
+		if err != nil {
+			return err
+		}
+		if node.Name != name {
+			address = node.Address
+		}
+		return nil
+	})
+	return address, err
+}
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 2541edbf6..6657cd9a2 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -525,17 +525,7 @@ func containersPost(d *Daemon, r *http.Request) Response {
 
 	targetNode := r.FormValue("targetNode")
 	if targetNode != "" {
-		address := ""
-		err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
-			node, err := tx.NodeByName(targetNode)
-			if err != nil {
-				return err
-			}
-			if node.Address != d.endpoints.NetworkAddress() {
-				address = node.Address
-			}
-			return nil
-		})
+		address, err := cluster.ResolveTarget(d.cluster, targetNode)
 		if err != nil {
 			return SmartError(err)
 		}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index bab4ef6aa..c1f1d2525 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -251,6 +251,38 @@ func storagePoolGet(d *Daemon, r *http.Request) Response {
 	}
 	pool.UsedBy = poolUsedBy
 
+	targetNode := r.FormValue("targetNode")
+
+	// If no target node is specified and the client is clustered, we omit
+	// the node-specific fields, namely "source"
+	clustered, err := cluster.Enabled(d.db)
+	if err != nil {
+		return SmartError(err)
+	}
+	if targetNode == "" && clustered {
+		delete(pool.Config, "source")
+	}
+
+	// If a target was specified, forward the request to the relevant node.
+	if targetNode != "" {
+		address, err := cluster.ResolveTarget(d.cluster, targetNode)
+		if err != nil {
+			return SmartError(err)
+		}
+		if address != "" {
+			cert := d.endpoints.NetworkCert()
+			client, err := cluster.Connect(address, cert, true)
+			if err != nil {
+				return SmartError(err)
+			}
+			client = client.ClusterTargetNode(targetNode)
+			pool, _, err = client.GetStoragePool(poolName)
+			if err != nil {
+				return SmartError(err)
+			}
+		}
+	}
+
 	etag := []interface{}{pool.Name, pool.Driver, pool.Config}
 
 	return SyncResponseETag(true, &pool, etag)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 08158711c..e071cbdc7 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -210,6 +210,13 @@ test_clustering_storage() {
   LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 dir
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep state: | grep -q CREATED
 
+  # The 'source' config key is omitted when showing the cluster
+  # configuration, and included when showing the node-specific one.
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc storage show pool1 | grep -q source
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 --target node1 | grep source | grep -q "$(basename "${LXD_ONE_DIR}")"
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 --target node2 | grep source | grep -q "$(basename "${LXD_TWO_DIR}")"
+
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From a7d32222b3d05d6e90ab4ab35d0073f30abb9c45 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 11 Dec 2017 12:16:31 +0000
Subject: [PATCH 117/227] Support deleting a pool across all nodes of a cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/storage_pools.go      | 28 ++++++++++++++++++++++++++++
 test/suites/clustering.sh |  3 +++
 2 files changed, 31 insertions(+)

diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index c1f1d2525..2d6479ab6 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -417,6 +417,34 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
+	// If this is a cluster notification, we're done, any database work
+	// will be done by the node that is originally serving the request.
+	if isClusterNotification(r) {
+		return EmptySyncResponse
+	}
+
+	// If we are clustered, also notify all other nodes, if any.
+	clustered, err := cluster.Enabled(d.db)
+	if err != nil {
+		return SmartError(err)
+	}
+	if clustered {
+		notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAll)
+		if err != nil {
+			return SmartError(err)
+		}
+		err = notifier(func(client lxd.ContainerServer) error {
+			_, _, err := client.GetServer()
+			if err != nil {
+				return err
+			}
+			return client.DeleteStoragePool(poolName)
+		})
+		if err != nil {
+			return SmartError(err)
+		}
+	}
+
 	err = dbStoragePoolDeleteAndUpdateCache(d.cluster, poolName)
 	if err != nil {
 		return SmartError(err)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index e071cbdc7..7cb6a3db7 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -216,6 +216,9 @@ test_clustering_storage() {
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 --target node1 | grep source | grep -q "$(basename "${LXD_ONE_DIR}")"
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 --target node2 | grep source | grep -q "$(basename "${LXD_TWO_DIR}")"
 
+  # Delete the storage pool
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage delete pool1
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc storage list | grep -q pool1
 
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown

From 181d7863c2dd71bb761309b9923b99ac6d64e925 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 11 Dec 2017 14:55:03 +0000
Subject: [PATCH 118/227] Delete containers from any node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_delete.go   | 10 ++++++++++
 test/suites/clustering.sh |  5 +++++
 2 files changed, 15 insertions(+)

diff --git a/lxd/container_delete.go b/lxd/container_delete.go
index c0226f349..6741c2fba 100644
--- a/lxd/container_delete.go
+++ b/lxd/container_delete.go
@@ -9,6 +9,16 @@ import (
 
 func containerDelete(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 7cb6a3db7..aba729a4b 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -143,6 +143,11 @@ test_clustering_containers() {
   # A Node: field indicates on which node the container is running
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Node: node2"
 
+  # Delete the container via node1 and create it again.
+  LXD_DIR="${LXD_ONE_DIR}" lxc delete foo
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc list | grep -q foo
+  LXD_DIR="${LXD_ONE_DIR}" lxc init --target node2 testimage foo
+
   # Start and stop the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc start foo
   LXD_DIR="${LXD_TWO_DIR}" lxc info foo | grep -q "Status: Running"

From 9746552d1c8bf4b189be7fd89753c66a02d43f80 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 12 Dec 2017 08:55:59 +0000
Subject: [PATCH 119/227] Add StoragePool.Nodes field to the API and to lxc
 storage show

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/storage_pools.go    | 25 +++++++++++++++++++++++++
 shared/api/storage_pool.go |  3 ++-
 test/suites/clustering.sh  |  2 ++
 3 files changed, 29 insertions(+), 1 deletion(-)

diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 943b08f90..7391c7418 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -363,9 +363,34 @@ func (c *Cluster) StoragePoolGet(poolName string) (int64, *api.StoragePool, erro
 		storagePool.State = "UNKNOWN"
 	}
 
+	nodes, err := c.storagePoolNodes(poolID)
+	if err != nil {
+		return -1, nil, err
+	}
+	storagePool.Nodes = nodes
+
 	return poolID, &storagePool, nil
 }
 
+// Return the names of the nodes the given pool is defined on.
+func (c *Cluster) storagePoolNodes(poolID int64) ([]string, error) {
+	stmt := `
+SELECT nodes.name FROM nodes
+  JOIN storage_pools_nodes ON storage_pools_nodes.node_id = nodes.id
+  WHERE storage_pools_nodes.storage_pool_id = ?
+`
+	var nodes []string
+	err := c.Transaction(func(tx *ClusterTx) error {
+		var err error
+		nodes, err = query.SelectStrings(tx.tx, stmt, poolID)
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	return nodes, nil
+}
+
 // Get config of a storage pool.
 func (c *Cluster) StoragePoolConfigGet(poolID int64) (map[string]string, error) {
 	var key, value string
diff --git a/shared/api/storage_pool.go b/shared/api/storage_pool.go
index ac5a2b3cb..614736b94 100644
--- a/shared/api/storage_pool.go
+++ b/shared/api/storage_pool.go
@@ -21,7 +21,8 @@ type StoragePool struct {
 	UsedBy []string `json:"used_by" yaml:"used_by"`
 
 	// API extension: clustering
-	State string `json:"state" yaml:"state"`
+	State string   `json:"state" yaml:"state"`
+	Nodes []string `json:"nodes" yaml:"nodes"`
 }
 
 // StoragePoolPut represents the modifiable fields of a LXD storage pool.
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index aba729a4b..1102a9c61 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -205,6 +205,8 @@ test_clustering_storage() {
 
   # Define storage pools on the two nodes
   LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node1
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage show pool1 | grep -q node1
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc storage show pool1 | grep -q node2
   LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node2
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep state: | grep -q PENDING
 

From 5517067c17345c28aab41ba79f967f1af646f1b2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 12 Dec 2017 11:27:37 +0000
Subject: [PATCH 120/227] Add db.ImageLocate to find which node has a local
 copy an image

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/images.go      | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++
 lxd/db/images_test.go | 38 ++++++++++++++++++++++++++++++++++++
 lxd/db/node.go        | 23 +++++++++++++++++++++-
 3 files changed, 114 insertions(+), 1 deletion(-)
 create mode 100644 lxd/db/images_test.go

diff --git a/lxd/db/images.go b/lxd/db/images.go
index d183d253d..91e8c1cc5 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -7,6 +7,7 @@ import (
 
 	_ "github.com/mattn/go-sqlite3"
 
+	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/osarch"
 )
@@ -303,6 +304,59 @@ func (c *Cluster) ImageGet(fingerprint string, public bool, strictMatching bool)
 	return id, &image, nil
 }
 
+// ImageLocate returns the address of an online node that has a local copy of
+// the given image, or an empty string if the image is already available on this
+// node.
+//
+// If the image is not available on any online node, an error is returned.
+func (c *Cluster) ImageLocate(fingerprint string) (string, error) {
+	stmt := `
+SELECT nodes.address FROM nodes
+  LEFT JOIN images_nodes ON images_nodes.node_id = nodes.id
+  LEFT JOIN images ON images_nodes.image_id = images.id
+WHERE images.fingerprint = ?
+`
+	var localAddress string // Address of this node
+	var addresses []string  // Addresses of online nodes with the image
+
+	err := c.Transaction(func(tx *ClusterTx) error {
+		var err error
+		localAddress, err = tx.NodeAddress()
+		if err != nil {
+			return err
+		}
+		allAddresses, err := query.SelectStrings(tx.tx, stmt, fingerprint)
+		if err != nil {
+			return err
+		}
+		for _, address := range allAddresses {
+			node, err := tx.NodeByAddress(address)
+			if err != nil {
+				return err
+			}
+			if node.IsDown() {
+				continue
+			}
+			addresses = append(addresses, address)
+		}
+		return err
+	})
+	if err != nil {
+		return "", err
+	}
+	if len(addresses) == 0 {
+		return "", fmt.Errorf("image not available on any online node")
+	}
+
+	for _, address := range addresses {
+		if address == localAddress {
+			return "", nil
+		}
+	}
+
+	return addresses[0], nil
+}
+
 func (c *Cluster) ImageDelete(id int) error {
 	_, err := exec(c.db, "DELETE FROM images WHERE id=?", id)
 	if err != nil {
diff --git a/lxd/db/images_test.go b/lxd/db/images_test.go
new file mode 100644
index 000000000..c1f9b145d
--- /dev/null
+++ b/lxd/db/images_test.go
@@ -0,0 +1,38 @@
+package db_test
+
+import (
+	"testing"
+	"time"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestImageLocate(t *testing.T) {
+	cluster, cleanup := db.NewTestCluster(t)
+	defer cleanup()
+
+	err := cluster.ImageInsert(
+		"abc", "x.gz", 16, false, false, "amd64", time.Now(), time.Now(), map[string]string{})
+	require.NoError(t, err)
+
+	address, err := cluster.ImageLocate("abc")
+	require.NoError(t, err)
+	assert.Equal(t, "", address)
+
+	// Pretend that the function is being run on another node.
+	cluster.NodeID(2)
+	address, err = cluster.ImageLocate("abc")
+	require.NoError(t, err)
+	assert.Equal(t, "0.0.0.0", address)
+
+	// Pretend that the target node is down
+	err = cluster.Transaction(func(tx *db.ClusterTx) error {
+		return tx.NodeHeartbeat("0.0.0.0", time.Now().Add(-time.Minute))
+	})
+	require.NoError(t, err)
+
+	address, err = cluster.ImageLocate("abc")
+	require.EqualError(t, err, "image not available on any online node")
+}
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 0fac94c64..e4d393360 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -78,6 +78,23 @@ func (c *ClusterTx) NodeName() (string, error) {
 	}
 }
 
+// NodeAddress returns the address of the node this method is invoked on.
+func (c *ClusterTx) NodeAddress() (string, error) {
+	stmt := "SELECT address FROM nodes WHERE id=?"
+	addresses, err := query.SelectStrings(c.tx, stmt, c.nodeID)
+	if err != nil {
+		return "", err
+	}
+	switch len(addresses) {
+	case 0:
+		return "", nil
+	case 1:
+		return addresses[0], nil
+	default:
+		return "", fmt.Errorf("inconsistency: non-unique node ID")
+	}
+}
+
 // Nodes returns all LXD nodes part of the cluster.
 //
 // If this LXD instance is not clustered, a list with a single node whose
@@ -247,5 +264,9 @@ func (c *ClusterTx) NodeClear(id int64) error {
 }
 
 func nodeIsDown(heartbeat time.Time) bool {
-	return heartbeat.Before(time.Now().Add(-20 * time.Second))
+	return heartbeat.Before(time.Now().Add(-time.Duration(nodeDownThreshold) * time.Second))
 }
+
+// How many seconds to wait before considering a node offline after no
+// heartbeat was received.
+var nodeDownThreshold = 20

From fa5d710818e85724a640530717f3127d38dd72bb Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 13 Dec 2017 10:11:52 +0000
Subject: [PATCH 121/227] Transfer images between nodes when creating a
 container

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container.go          | 29 +++++++++++++++++++++++-
 lxd/containers_post.go    |  4 ++--
 lxd/db/images.go          | 17 +++++++++++++-
 lxd/images.go             | 57 +++++++++++++++++++++++++++++++++++++++++++++++
 test/suites/clustering.sh | 15 ++++++++-----
 5 files changed, 113 insertions(+), 9 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index c561c2d16..6bd1eea87 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -11,6 +11,7 @@ import (
 
 	"gopkg.in/lxc/go-lxc.v2"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/sys"
@@ -18,6 +19,7 @@ import (
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/idmap"
+	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/osarch"
 )
 
@@ -603,13 +605,38 @@ func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (contai
 	return c, nil
 }
 
-func containerCreateFromImage(s *state.State, args db.ContainerArgs, hash string) (container, error) {
+func containerCreateFromImage(d *Daemon, args db.ContainerArgs, hash string) (container, error) {
+	s := d.State()
+
 	// Get the image properties
 	_, img, err := s.Cluster.ImageGet(hash, false, false)
 	if err != nil {
 		return nil, err
 	}
 
+	// Check if the image is available locally or it's on another node.
+	nodeAddress, err := s.Cluster.ImageLocate(hash)
+	if err != nil {
+		return nil, err
+	}
+	if nodeAddress != "" {
+		// The image is available from another node, let's try to
+		// import it.
+		logger.Debugf("Transfering image %s from node %s", hash, nodeAddress)
+		client, err := cluster.Connect(nodeAddress, d.endpoints.NetworkCert(), false)
+		if err != nil {
+			return nil, err
+		}
+		err = imageImportFromNode(filepath.Join(d.os.VarDir, "images"), client, hash)
+		if err != nil {
+			return nil, err
+		}
+		err = d.cluster.ImageAssociateNode(hash)
+		if err != nil {
+			return nil, err
+		}
+	}
+
 	// Set the "image.*" keys
 	if img.Properties != nil {
 		for k, v := range img.Properties {
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 6657cd9a2..658c7579e 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -119,7 +119,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 			return err
 		}
 
-		_, err = containerCreateFromImage(d.State(), args, info.Fingerprint)
+		_, err = containerCreateFromImage(d, args, info.Fingerprint)
 		return err
 	}
 
@@ -322,7 +322,7 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 		}
 
 		if ps.MigrationType() == MigrationFSType_RSYNC {
-			c, err = containerCreateFromImage(d.State(), args, req.Source.BaseImage)
+			c, err = containerCreateFromImage(d, args, req.Source.BaseImage)
 			if err != nil {
 				return InternalError(err)
 			}
diff --git a/lxd/db/images.go b/lxd/db/images.go
index 91e8c1cc5..77b7eb1bb 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -334,7 +334,7 @@ WHERE images.fingerprint = ?
 			if err != nil {
 				return err
 			}
-			if node.IsDown() {
+			if address != localAddress && node.IsDown() {
 				continue
 			}
 			addresses = append(addresses, address)
@@ -357,6 +357,21 @@ WHERE images.fingerprint = ?
 	return addresses[0], nil
 }
 
+// ImageAssociateNode creates a new entry in the images_nodes table for
+// tracking that the current node has the given image.
+func (c *Cluster) ImageAssociateNode(fingerprint string) error {
+	imageID, _, err := c.ImageGet(fingerprint, false, true)
+	if err != nil {
+		return err
+	}
+
+	err = c.Transaction(func(tx *ClusterTx) error {
+		_, err := tx.tx.Exec("INSERT INTO images_nodes(image_id, node_id) VALUES(?, ?)", imageID, c.nodeID)
+		return err
+	})
+	return err
+}
+
 func (c *Cluster) ImageDelete(id int) error {
 	_, err := exec(c.db, "DELETE FROM images WHERE id=?", id)
 	if err != nil {
diff --git a/lxd/images.go b/lxd/images.go
index 420ec69c9..c6e361e3a 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -24,6 +24,7 @@ import (
 	"golang.org/x/net/context"
 	"gopkg.in/yaml.v2"
 
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
@@ -1575,6 +1576,62 @@ func imageSecret(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
+func imageImportFromNode(imagesDir string, client lxd.ContainerServer, fingerprint string) error {
+	// Prepare the temp files
+	buildDir, err := ioutil.TempDir(imagesDir, "lxd_build_")
+	if err != nil {
+		return errors.Wrap(err, "failed to create temporary directory for download")
+	}
+	defer os.RemoveAll(buildDir)
+
+	metaFile, err := ioutil.TempFile(buildDir, "lxd_tar_")
+	if err != nil {
+		return err
+	}
+	defer metaFile.Close()
+
+	rootfsFile, err := ioutil.TempFile(buildDir, "lxd_tar_")
+	if err != nil {
+		return err
+	}
+	defer rootfsFile.Close()
+
+	getReq := lxd.ImageFileRequest{
+		MetaFile:   io.WriteSeeker(metaFile),
+		RootfsFile: io.WriteSeeker(rootfsFile),
+	}
+	getResp, err := client.GetImageFile(fingerprint, getReq)
+	if err != nil {
+		return err
+	}
+	metaFile.Close()
+	rootfsFile.Close()
+
+	if getResp.RootfsSize == 0 {
+		// This is a unified image.
+		rootfsPath := filepath.Join(imagesDir, fingerprint)
+		err := shared.FileMove(metaFile.Name(), rootfsPath)
+		if err != nil {
+			return err
+		}
+	} else {
+		// This is a split image.
+		metaPath := filepath.Join(imagesDir, fingerprint)
+		rootfsPath := filepath.Join(imagesDir, fingerprint+".rootfs")
+
+		err := shared.FileMove(metaFile.Name(), metaPath)
+		if err != nil {
+			return nil
+		}
+		err = shared.FileMove(rootfsFile.Name(), rootfsPath)
+		if err != nil {
+			return nil
+		}
+	}
+
+	return nil
+}
+
 func imageRefresh(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 	imageId, imageInfo, err := d.cluster.ImageGet(fingerprint, false, false)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 1102a9c61..e3677fda0 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -143,17 +143,22 @@ test_clustering_containers() {
   # A Node: field indicates on which node the container is running
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Node: node2"
 
-  # Delete the container via node1 and create it again.
-  LXD_DIR="${LXD_ONE_DIR}" lxc delete foo
-  ! LXD_DIR="${LXD_ONE_DIR}" lxc list | grep -q foo
-  LXD_DIR="${LXD_ONE_DIR}" lxc init --target node2 testimage foo
-
   # Start and stop the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc start foo
   LXD_DIR="${LXD_TWO_DIR}" lxc info foo | grep -q "Status: Running"
   LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q RUNNING
   LXD_DIR="${LXD_ONE_DIR}" lxc stop foo
 
+  # Create a container on node1 using the image that was stored on
+  # node2.
+  LXD_DIR="${LXD_TWO_DIR}" lxc init --target node1 testimage bar
+  LXD_DIR="${LXD_ONE_DIR}" lxc start bar
+  LXD_DIR="${LXD_TWO_DIR}" lxc stop bar
+  LXD_DIR="${LXD_ONE_DIR}" lxc delete bar
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q bar
+
+  # Delete the network now, since we're going to shutdown node2 and it
+  # won't be possible afterwise.
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
   # Shutdown node 2, wait for it to be considered offline, and list

From c328ba766c732b0fbb7baa33d9236fa363a01135 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 13 Dec 2017 14:36:59 +0000
Subject: [PATCH 122/227] Add Network.State and Network.Nodes API fields

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go |  2 +-
 lxd/db/cluster/update.go |  7 +++++++
 lxd/db/migration.go      | 50 ++++++++++++++++++++++++++++++------------------
 lxd/db/migration_test.go |  4 ++++
 lxd/db/networks.go       | 46 ++++++++++++++++++++++++++++++++++++++++++--
 lxd/db/storage_pools.go  |  1 +
 shared/api/network.go    |  4 ++++
 7 files changed, 92 insertions(+), 22 deletions(-)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 088ba8183..2609dce8c 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -234,5 +234,5 @@ CREATE TABLE storage_volumes_config (
     FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
 
-INSERT INTO schema (version, updated_at) VALUES (4, strftime("%s"))
+INSERT INTO schema (version, updated_at) VALUES (5, strftime("%s"))
 `
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 40b0b60c9..943f457b4 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -27,6 +27,13 @@ var updates = map[int]schema.Update{
 	2: updateFromV1,
 	3: updateFromV2,
 	4: updateFromV3,
+	5: updateFromV4,
+}
+
+func updateFromV4(tx *sql.Tx) error {
+	stmt := "UPDATE networks SET state = 1"
+	_, err := tx.Exec(stmt)
+	return err
 }
 
 func updateFromV3(tx *sql.Tx) error {
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 4be14d2b0..e75df82b6 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -6,6 +6,7 @@ import (
 	"strings"
 
 	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared"
 	"github.com/pkg/errors"
 )
 
@@ -122,6 +123,11 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 				appendNodeID()
 			case "storage_volumes_config":
 				appendNodeID()
+			case "networks":
+				fallthrough
+			case "storage_pools":
+				columns = append(columns, "state")
+				row = append(row, storagePoolCreated)
 			}
 			stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
 			stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
@@ -138,25 +144,9 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 			}
 
 			// Also insert the image ID -> node ID association.
-			if table == "images" {
-				stmt := "INSERT INTO images_nodes(image_id, node_id) VALUES(?, 1)"
-				var imageID int64
-				for i, column := range columns {
-					if column == "id" {
-						imageID = row[i].(int64)
-						if err != nil {
-							return err
-						}
-						break
-					}
-				}
-				if imageID == 0 {
-					return fmt.Errorf("image has invalid ID")
-				}
-				_, err := tx.Exec(stmt, row...)
-				if err != nil {
-					return errors.Wrapf(err, "failed to associate image to node")
-				}
+			if shared.StringInSlice(table, []string{"images", "networks", "storage_pools"}) {
+				entity := table[:len(table)-1]
+				importNodeAssociation(entity, columns, row, tx)
 			}
 		}
 	}
@@ -164,6 +154,28 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 	return tx.Commit()
 }
 
+// Insert a row in one of the nodes association tables (storage_pools_nodes,
+// networks_nodes, images_nodes).
+func importNodeAssociation(entity string, columns []string, row []interface{}, tx *sql.Tx) error {
+	stmt := fmt.Sprintf(
+		"INSERT INTO %ss_nodes(%s_id, node_id) VALUES(?, 1)", entity, entity)
+	var id int64
+	for i, column := range columns {
+		if column == "id" {
+			id = row[i].(int64)
+			break
+		}
+	}
+	if id == 0 {
+		return fmt.Errorf("entity %s has invalid ID", entity)
+	}
+	_, err := tx.Exec(stmt, row...)
+	if err != nil {
+		return errors.Wrapf(err, "failed to associate %s to node", entity)
+	}
+	return nil
+}
+
 // Dump is a dump of all the user data in lxd.db prior the migration to the
 // cluster db.
 type Dump struct {
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 720a9dfb1..28281e08f 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -69,6 +69,8 @@ func TestImportPreClusteringData(t *testing.T) {
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), id)
 	assert.Equal(t, "true", network.Config["ipv4.nat"])
+	assert.Equal(t, "CREATED", network.State)
+	assert.Equal(t, []string{"none"}, network.Nodes)
 
 	// storage
 	pools, err := cluster.StoragePools()
@@ -79,6 +81,8 @@ func TestImportPreClusteringData(t *testing.T) {
 	assert.Equal(t, int64(1), id)
 	assert.Equal(t, "/foo/bar", pool.Config["source"])
 	assert.Equal(t, "123", pool.Config["size"])
+	assert.Equal(t, "CREATED", pool.State)
+	assert.Equal(t, []string{"none"}, pool.Nodes)
 	volumes, err := cluster.StoragePoolVolumesGet(id, []int{1})
 	require.NoError(t, err)
 	assert.Len(t, volumes, 1)
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 9115e7e46..3ce009c54 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -90,13 +90,21 @@ func (c *Cluster) Networks() ([]string, error) {
 	return response, nil
 }
 
+// Network state.
+const (
+	networkPending int = iota // Network defined but not yet created.
+	networkCreated            // Network created on all nodes.
+	networkErrored            // Network creation failed on some nodes
+)
+
 func (c *Cluster) NetworkGet(name string) (int64, *api.Network, error) {
 	description := sql.NullString{}
 	id := int64(-1)
+	state := 0
 
-	q := "SELECT id, description FROM networks WHERE name=?"
+	q := "SELECT id, description, state FROM networks WHERE name=?"
 	arg1 := []interface{}{name}
-	arg2 := []interface{}{&id, &description}
+	arg2 := []interface{}{&id, &description, &state}
 	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		return -1, nil, err
@@ -115,9 +123,43 @@ func (c *Cluster) NetworkGet(name string) (int64, *api.Network, error) {
 	network.Description = description.String
 	network.Config = config
 
+	switch state {
+	case networkPending:
+		network.State = "PENDING"
+	case networkCreated:
+		network.State = "CREATED"
+	default:
+		network.State = "UNKNOWN"
+	}
+
+	nodes, err := c.networkNodes(id)
+	if err != nil {
+		return -1, nil, err
+	}
+	network.Nodes = nodes
+
 	return id, &network, nil
 }
 
+// Return the names of the nodes the given network is defined on.
+func (c *Cluster) networkNodes(networkID int64) ([]string, error) {
+	stmt := `
+SELECT nodes.name FROM nodes
+  JOIN networks_nodes ON networks_nodes.node_id = nodes.id
+  WHERE networks_nodes.network_id = ?
+`
+	var nodes []string
+	err := c.Transaction(func(tx *ClusterTx) error {
+		var err error
+		nodes, err = query.SelectStrings(tx.tx, stmt, networkID)
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	return nodes, nil
+}
+
 func (c *Cluster) NetworkGetInterface(devName string) (int64, *api.Network, error) {
 	id := int64(-1)
 	name := ""
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 7391c7418..fe3e259cc 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -439,6 +439,7 @@ func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poo
 	values := []interface{}{id, c.nodeID}
 	_, err = query.UpsertObject(tx, "storage_pools_nodes", columns, values)
 	if err != nil {
+		tx.Rollback()
 		return -1, err
 	}
 
diff --git a/shared/api/network.go b/shared/api/network.go
index 4fe3e8705..196c7e490 100644
--- a/shared/api/network.go
+++ b/shared/api/network.go
@@ -38,6 +38,10 @@ type Network struct {
 
 	// API extension: network
 	Managed bool `json:"managed" yaml:"managed"`
+
+	// API extension: clustering
+	State string   `json:"state" yaml:"state"`
+	Nodes []string `json:"nodes" yaml:"nodes"`
 }
 
 // Writable converts a full Network struct into a NetworkPut struct (filters read-only fields)

From be987f2fccb4a76169ad69889e2f8fa8976428c1 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 13 Dec 2017 15:06:35 +0000
Subject: [PATCH 123/227] Add debug logging when loading data from a
 pre-clustering db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/db.go | 1 +
 1 file changed, 1 insertion(+)

diff --git a/lxd/db/db.go b/lxd/db/db.go
index 6bcde122e..9abc84a7c 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -65,6 +65,7 @@ func OpenNode(dir string, fresh func(*Node) error, legacyPatches map[int]*Legacy
 	legacyHook := legacyPatchHook(db, legacyPatches)
 	hook := func(version int, tx *sql.Tx) error {
 		if version == node.UpdateFromPreClustering {
+			logger.Debug("Loading pre-clustering sqlite data")
 			var err error
 			dump, err = LoadPreClusteringData(tx)
 			if err != nil {

From b574697e34dffc4d162e3f289a357e3dcbacbcd3 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 13 Dec 2017 15:46:03 +0000
Subject: [PATCH 124/227] Migrate the profiles table before containers_profile

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/migration.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index e75df82b6..fe02ae1dd 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -190,6 +190,7 @@ type Dump struct {
 var preClusteringTables = []string{
 	"certificates",
 	"config",
+	"profiles",
 	"containers",
 	"containers_config",
 	"containers_devices",
@@ -201,7 +202,6 @@ var preClusteringTables = []string{
 	"images_source",
 	"networks",
 	"networks_config",
-	"profiles",
 	"storage_pools",
 	"storage_pools_config",
 	"storage_volumes",

From f686978b4e65d310333e44b58cd496c2ed492801 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 14 Dec 2017 08:41:11 +0000
Subject: [PATCH 125/227] Add db APIs for creating networks in the PENDING
 state

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/networks.go      | 142 +++++++++++++++++++++++++++++++++++++++++++++++-
 lxd/db/networks_test.go |  72 ++++++++++++++++++++++++
 2 files changed, 213 insertions(+), 1 deletion(-)
 create mode 100644 lxd/db/networks_test.go

diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 3ce009c54..bb232980f 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -8,6 +8,7 @@ import (
 	_ "github.com/mattn/go-sqlite3"
 
 	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 )
 
@@ -55,6 +56,23 @@ func (c *ClusterTx) NetworkIDs() (map[string]int64, error) {
 	return ids, nil
 }
 
+// NetworkID returns the ID of the network with the given name.
+func (c *ClusterTx) NetworkID(name string) (int64, error) {
+	stmt := "SELECT id FROM networks WHERE name=?"
+	ids, err := query.SelectIntegers(c.tx, stmt, name)
+	if err != nil {
+		return -1, err
+	}
+	switch len(ids) {
+	case 0:
+		return -1, NoSuchObjectError
+	case 1:
+		return int64(ids[0]), nil
+	default:
+		return -1, fmt.Errorf("more than one network has the given name")
+	}
+}
+
 // NetworkConfigAdd adds a new entry in the networks_config table
 func (c *ClusterTx) NetworkConfigAdd(networkID, nodeID int64, config map[string]string) error {
 	return networkConfigAdd(c.tx, networkID, nodeID, config)
@@ -72,6 +90,128 @@ func (c *ClusterTx) NetworkNodeJoin(networkID, nodeID int64) error {
 	return err
 }
 
+// NetworkNodeConfigs returns the node-specific configuration of all
+// nodes grouped by node name, for the given networkID.
+//
+// If the network is not defined on all nodes, an error is returned.
+func (c *ClusterTx) NetworkNodeConfigs(networkID int64) (map[string]map[string]string, error) {
+	// Fetch all nodes.
+	nodes, err := c.Nodes()
+	if err != nil {
+		return nil, err
+	}
+
+	// Fetch the names of the nodes where the storage network is defined.
+	stmt := `
+SELECT nodes.name FROM nodes
+  LEFT JOIN networks_nodes ON networks_nodes.node_id = nodes.id
+  LEFT JOIN networks ON networks_nodes.network_id = networks.id
+WHERE networks.id = ? AND networks.state = ?
+`
+	defined, err := query.SelectStrings(c.tx, stmt, networkID, networkPending)
+	if err != nil {
+		return nil, err
+	}
+
+	// Figure which nodes are missing
+	missing := []string{}
+	for _, node := range nodes {
+		if !shared.StringInSlice(node.Name, defined) {
+			missing = append(missing, node.Name)
+		}
+	}
+
+	if len(missing) > 0 {
+		return nil, fmt.Errorf("Network not defined on nodes: %s", strings.Join(missing, ", "))
+	}
+
+	configs := map[string]map[string]string{}
+	for _, node := range nodes {
+		config, err := query.SelectConfig(c.tx, "networks_config", "node_id=?", node.ID)
+		if err != nil {
+			return nil, err
+		}
+		configs[node.Name] = config
+	}
+
+	return configs, nil
+}
+
+// NetworkCreatePending creates a new pending network on the node with
+// the given name.
+func (c *ClusterTx) NetworkCreatePending(node, name string, conf map[string]string) error {
+	// First check if a network with the given name exists, and, if
+	// so, that it's in the pending state.
+	network := struct {
+		id    int64
+		state int
+	}{}
+
+	var errConsistency error
+	dest := func(i int) []interface{} {
+		// Sanity check that there is at most one pool with the given name.
+		if i != 0 {
+			errConsistency = fmt.Errorf("more than one network exists with the given name")
+		}
+		return []interface{}{&network.id, &network.state}
+	}
+	stmt := "SELECT id, state FROM networks WHERE name=?"
+	err := query.SelectObjects(c.tx, dest, stmt, name)
+	if err != nil {
+		return err
+	}
+	if errConsistency != nil {
+		return errConsistency
+	}
+
+	var networkID = network.id
+	if networkID == 0 {
+		// No existing network with the given name was found, let's create
+		// one.
+		columns := []string{"name"}
+		values := []interface{}{name}
+		networkID, err = query.UpsertObject(c.tx, "networks", columns, values)
+		if err != nil {
+			return err
+		}
+	} else {
+		// Check that the existing network  is in the pending state.
+		if network.state != networkPending {
+			return fmt.Errorf("network is not in pending state")
+		}
+	}
+
+	// Get the ID of the node with the given name.
+	nodeInfo, err := c.NodeByName(node)
+	if err != nil {
+		return err
+	}
+
+	// Check that no network entry for this node and network exists yet.
+	count, err := query.Count(
+		c.tx, "networks_nodes", "network_id=? AND node_id=?", networkID, nodeInfo.ID)
+	if err != nil {
+		return err
+	}
+	if count != 0 {
+		return DbErrAlreadyDefined
+	}
+
+	// Insert the node-specific configuration.
+	columns := []string{"network_id", "node_id"}
+	values := []interface{}{networkID, nodeInfo.ID}
+	_, err = query.UpsertObject(c.tx, "networks_nodes", columns, values)
+	if err != nil {
+		return err
+	}
+	err = c.NetworkConfigAdd(networkID, nodeInfo.ID, conf)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
 func (c *Cluster) Networks() ([]string, error) {
 	q := fmt.Sprintf("SELECT name FROM networks")
 	inargs := []interface{}{}
@@ -253,7 +393,7 @@ func (c *Cluster) NetworkCreate(name, description string, config map[string]stri
 		return -1, err
 	}
 
-	result, err := tx.Exec("INSERT INTO networks (name, description) VALUES (?, ?)", name, description)
+	result, err := tx.Exec("INSERT INTO networks (name, description, state) VALUES (?, ?, ?)", name, description, networkCreated)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
diff --git a/lxd/db/networks_test.go b/lxd/db/networks_test.go
new file mode 100644
index 000000000..81d7b5d95
--- /dev/null
+++ b/lxd/db/networks_test.go
@@ -0,0 +1,72 @@
+package db_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestNetworkCreatePending(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+	_, err = tx.NodeAdd("rusp", "5.6.7.8:666")
+	require.NoError(t, err)
+
+	config := map[string]string{"bridge.external_interfaces": "foo"}
+	err = tx.NetworkCreatePending("buzz", "network1", config)
+	require.NoError(t, err)
+
+	networkID, err := tx.NetworkID("network1")
+	require.NoError(t, err)
+	assert.True(t, networkID > 0)
+
+	config = map[string]string{"bridge.external_interfaces": "bar"}
+	err = tx.NetworkCreatePending("rusp", "network1", config)
+	require.NoError(t, err)
+
+	// The initial node (whose name is 'none' by default) is missing.
+	_, err = tx.NetworkNodeConfigs(networkID)
+	require.EqualError(t, err, "Network not defined on nodes: none")
+
+	config = map[string]string{"bridge.external_interfaces": "egg"}
+	err = tx.NetworkCreatePending("none", "network1", config)
+	require.NoError(t, err)
+
+	// Now the storage is defined on all nodes.
+	configs, err := tx.NetworkNodeConfigs(networkID)
+	require.NoError(t, err)
+	assert.Len(t, configs, 3)
+	assert.Equal(t, map[string]string{"bridge.external_interfaces": "foo"}, configs["buzz"])
+	assert.Equal(t, map[string]string{"bridge.external_interfaces": "bar"}, configs["rusp"])
+	assert.Equal(t, map[string]string{"bridge.external_interfaces": "egg"}, configs["none"])
+}
+
+// If an entry for the given network and node already exists, an error is
+// returned.
+func TestNetworksCreatePending_AlreadyDefined(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	err = tx.NetworkCreatePending("buzz", "network1", map[string]string{})
+	require.NoError(t, err)
+
+	err = tx.NetworkCreatePending("buzz", "network1", map[string]string{})
+	require.Equal(t, db.DbErrAlreadyDefined, err)
+}
+
+// If no node with the given name is found, an error is returned.
+func TestNetworksCreatePending_NonExistingNode(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	err := tx.NetworkCreatePending("buzz", "network1", map[string]string{})
+	require.Equal(t, db.NoSuchObjectError, err)
+}

From e0d95e9aafb2849c25cffa8b03c1beb9414a363a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 14 Dec 2017 08:41:41 +0000
Subject: [PATCH 126/227] Handle the targetNode parameter when POST'ing to
 /1.0/networks

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/lxd_networks.go   |  6 +++++-
 lxd/api_networks_test.go | 37 +++++++++++++++++++++++++++++++++++++
 lxd/networks.go          | 29 ++++++++++++++++++++++++++++-
 3 files changed, 70 insertions(+), 2 deletions(-)
 create mode 100644 lxd/api_networks_test.go

diff --git a/client/lxd_networks.go b/client/lxd_networks.go
index 84ea4703c..b3a5fbc45 100644
--- a/client/lxd_networks.go
+++ b/client/lxd_networks.go
@@ -61,7 +61,11 @@ func (r *ProtocolLXD) CreateNetwork(network api.NetworksPost) error {
 	}
 
 	// Send the request
-	_, _, err := r.query("POST", "/networks", network, "")
+	path := "/networks"
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	_, _, err := r.query("POST", path, network, "")
 	if err != nil {
 		return err
 	}
diff --git a/lxd/api_networks_test.go b/lxd/api_networks_test.go
new file mode 100644
index 000000000..dc51c1c23
--- /dev/null
+++ b/lxd/api_networks_test.go
@@ -0,0 +1,37 @@
+package main
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/shared/api"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Create a new pending network using the targetNode query paramenter.
+func TestNetworksCreate_TargetNode(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping storage-networks targetNode test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	daemon := daemons[0]
+	client := f.ClientUnix(daemon).ClusterTargetNode("rusp-0")
+
+	networkPost := api.NetworksPost{
+		Name: "mynetwork",
+	}
+
+	err := client.CreateNetwork(networkPost)
+	require.NoError(t, err)
+
+	network, _, err := client.GetNetwork("mynetwork")
+	require.NoError(t, err)
+
+	assert.Equal(t, "PENDING", network.State)
+	assert.Equal(t, []string{"rusp-0"}, network.Nodes)
+}
diff --git a/lxd/networks.go b/lxd/networks.go
index f0b120641..3674726d0 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -103,6 +103,28 @@ func networksPost(d *Daemon, r *http.Request) Response {
 		req.Config = map[string]string{}
 	}
 
+	url := fmt.Sprintf("/%s/networks/%s", version.APIVersion, req.Name)
+	response := SyncResponseLocation(true, nil, url)
+
+	targetNode := r.FormValue("targetNode")
+	if targetNode != "" {
+		// A targetNode was specified, let's just define the node's
+		// network without actually creating it. The only legal key
+		// value for the storage config is 'bridge.external_interfaces'.
+		for key := range req.Config {
+			if key != "bridge.external_interfaces" {
+				return SmartError(fmt.Errorf("Invalid config key '%s'", key))
+			}
+		}
+		err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.NetworkCreatePending(targetNode, req.Name, req.Config)
+		})
+		if err != nil {
+			return SmartError(err)
+		}
+		return response
+	}
+
 	err = networkValidateConfig(req.Name, req.Config)
 	if err != nil {
 		return BadRequest(err)
@@ -157,7 +179,7 @@ func networksPost(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
-	return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/networks/%s", version.APIVersion, req.Name))
+	return response
 }
 
 var networksCmd = Command{name: "networks", get: networksGet, post: networksPost}
@@ -234,6 +256,11 @@ func doNetworkGet(d *Daemon, name string) (api.Network, error) {
 		}
 	}
 
+	if dbInfo != nil {
+		n.State = dbInfo.State
+		n.Nodes = dbInfo.Nodes
+	}
+
 	return n, nil
 }
 

From 41e829895fa96ff4e046d9405e9e8751caedb085 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 14 Dec 2017 11:59:26 +0000
Subject: [PATCH 127/227] Support --target option in lxc network create

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/network.go            | 35 ++++++++++++++++++++++++++------
 lxd/db/networks.go        |  2 +-
 lxd/networks.go           | 26 ++++++++++++++----------
 test/main.sh              |  1 +
 test/suites/clustering.sh | 51 +++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 97 insertions(+), 18 deletions(-)

diff --git a/lxc/network.go b/lxc/network.go
index 3ba7c0ca1..d2c77e587 100644
--- a/lxc/network.go
+++ b/lxc/network.go
@@ -15,11 +15,13 @@ import (
 	"github.com/lxc/lxd/lxc/config"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/gnuflag"
 	"github.com/lxc/lxd/shared/i18n"
 	"github.com/lxc/lxd/shared/termios"
 )
 
 type networkCmd struct {
+	target string
 }
 
 func (c *networkCmd) showByDefault() bool {
@@ -58,7 +60,7 @@ lxc network list [<remote>:]
 lxc network show [<remote>:]<network>
     Show details of a network.
 
-lxc network create [<remote>:]<network> [key=value...]
+lxc network create [<remote>:]<network> [key=value...] [--target <node>]
     Create a network.
 
 lxc network get [<remote>:]<network> <key>
@@ -96,7 +98,9 @@ cat network.yaml | lxc network edit <network>
     Update a network using the content of network.yaml`)
 }
 
-func (c *networkCmd) flags() {}
+func (c *networkCmd) flags() {
+	gnuflag.StringVar(&c.target, "target", "", i18n.G("Node name"))
+}
 
 func (c *networkCmd) run(conf *config.Config, args []string) error {
 	if len(args) < 1 {
@@ -250,12 +254,22 @@ func (c *networkCmd) doNetworkCreate(client lxd.ContainerServer, name string, ar
 		network.Config[entry[0]] = entry[1]
 	}
 
+	// If a target node was specified the API won't actually create the
+	// network, but only define it as pending in the database.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	err := client.CreateNetwork(network)
 	if err != nil {
 		return err
 	}
 
-	fmt.Printf(i18n.G("Network %s created")+"\n", name)
+	if c.target != "" {
+		fmt.Printf(i18n.G("Network %s pending on node %s")+"\n", name, c.target)
+	} else {
+		fmt.Printf(i18n.G("Network %s created")+"\n", name)
+	}
 	return nil
 }
 
@@ -511,19 +525,28 @@ func (c *networkCmd) doNetworkList(conf *config.Config, args []string) error {
 		}
 
 		strUsedBy := fmt.Sprintf("%d", len(network.UsedBy))
-		data = append(data, []string{network.Name, network.Type, strManaged, network.Description, strUsedBy})
+		details := []string{network.Name, network.Type, strManaged, network.Description, strUsedBy}
+		if client.IsClustered() {
+			details = append(details, network.State)
+		}
+		data = append(data, details)
 	}
 
 	table := tablewriter.NewWriter(os.Stdout)
 	table.SetAutoWrapText(false)
 	table.SetAlignment(tablewriter.ALIGN_LEFT)
 	table.SetRowLine(true)
-	table.SetHeader([]string{
+	header := []string{
 		i18n.G("NAME"),
 		i18n.G("TYPE"),
 		i18n.G("MANAGED"),
 		i18n.G("DESCRIPTION"),
-		i18n.G("USED BY")})
+		i18n.G("USED BY"),
+	}
+	if client.IsClustered() {
+		header = append(header, i18n.G("STATE"))
+	}
+	table.SetHeader(header)
 	sort.Sort(byName(data))
 	table.AppendBulk(data)
 	table.Render()
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index bb232980f..2039c9f6d 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -213,7 +213,7 @@ func (c *ClusterTx) NetworkCreatePending(node, name string, conf map[string]stri
 }
 
 func (c *Cluster) Networks() ([]string, error) {
-	q := fmt.Sprintf("SELECT name FROM networks")
+	q := "SELECT name FROM networks"
 	inargs := []interface{}{}
 	var name string
 	outfmt := []interface{}{name}
diff --git a/lxd/networks.go b/lxd/networks.go
index 3674726d0..71cd99190 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -90,19 +90,15 @@ func networksPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("Only 'bridge' type networks can be created"))
 	}
 
-	networks, err := networkGetInterfaces(d.cluster)
-	if err != nil {
-		return InternalError(err)
-	}
-
-	if shared.StringInSlice(req.Name, networks) {
-		return BadRequest(fmt.Errorf("The network already exists"))
-	}
-
 	if req.Config == nil {
 		req.Config = map[string]string{}
 	}
 
+	err = networkValidateConfig(req.Name, req.Config)
+	if err != nil {
+		return BadRequest(err)
+	}
+
 	url := fmt.Sprintf("/%s/networks/%s", version.APIVersion, req.Name)
 	response := SyncResponseLocation(true, nil, url)
 
@@ -120,14 +116,22 @@ func networksPost(d *Daemon, r *http.Request) Response {
 			return tx.NetworkCreatePending(targetNode, req.Name, req.Config)
 		})
 		if err != nil {
+			if err == db.DbErrAlreadyDefined {
+				return BadRequest(
+					fmt.Errorf("The network already defined on node %s", targetNode))
+			}
 			return SmartError(err)
 		}
 		return response
 	}
 
-	err = networkValidateConfig(req.Name, req.Config)
+	networks, err := networkGetInterfaces(d.cluster)
 	if err != nil {
-		return BadRequest(err)
+		return InternalError(err)
+	}
+
+	if shared.StringInSlice(req.Name, networks) {
+		return BadRequest(fmt.Errorf("The network already exists"))
 	}
 
 	// Set some default values where needed
diff --git a/test/main.sh b/test/main.sh
index 1bfe44958..246d2229e 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -197,6 +197,7 @@ run_test test_proxy_device "proxy device"
 run_test test_clustering_membership "clustering membership"
 run_test test_clustering_containers "clustering containers"
 run_test test_clustering_storage "clustering storage"
+run_test test_clustering_network "clustering network"
 
 # shellcheck disable=SC2034
 TEST_RESULT=success
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index e3677fda0..68f741450 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -241,3 +241,54 @@ test_clustering_storage() {
   teardown_clustering_netns
   teardown_clustering_bridge
 }
+
+test_clustering_network() {
+  setup_clustering_bridge
+  prefix="lxd$$"
+  bridge="${prefix}"
+
+  setup_clustering_netns 1
+  LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_ONE_DIR}"
+  ns1="${prefix}1"
+  spawn_lxd_and_bootstrap_cluster "${ns1}" "${bridge}" "${LXD_ONE_DIR}"
+
+  # The state of the preseeded network shows up as CREATED
+  LXD_DIR="${LXD_ONE_DIR}" lxc network list | grep "${bridge}" | grep -q CREATED
+
+  # Add a newline at the end of each line. YAML as weird rules..
+  cert=$(sed ':a;N;$!ba;s/\n/\n\n/g' "${LXD_ONE_DIR}/server.crt")
+
+  # Spawn a second node
+  setup_clustering_netns 2
+  LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_TWO_DIR}"
+  ns2="${prefix}2"
+  spawn_lxd_and_join_cluster "${ns2}" "${bridge}" "${cert}" 2 1 "${LXD_TWO_DIR}"
+
+  # The state of the preseeded network is still CREATED
+  LXD_DIR="${LXD_ONE_DIR}" lxc network list| grep "${bridge}" | grep -q CREATED
+
+  # Trying to pass config values other than
+  # 'bridge.external_interfaces' results in an error
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc storage create network foo ipv4.address=auto --target node1
+
+  net="${bridge}x"
+
+  # Define networks on the two nodes
+  LXD_DIR="${LXD_ONE_DIR}" lxc network create "${net}" --target node1
+  LXD_DIR="${LXD_TWO_DIR}" lxc network show  "${net}" | grep -q node1
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc network show "${net}" | grep -q node2
+  LXD_DIR="${LXD_ONE_DIR}" lxc network create "${net}" --target node2
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc network create "${net}" --target node2
+  LXD_DIR="${LXD_ONE_DIR}" lxc network show "${net}" | grep state: | grep -q PENDING
+
+  LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
+  LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
+  sleep 2
+  rm -f "${LXD_TWO_DIR}/unix.socket"
+  rm -f "${LXD_ONE_DIR}/unix.socket"
+
+  teardown_clustering_netns
+  teardown_clustering_bridge
+}

From 19485e98eeb6a702635e67572f7443431118bd56 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 14 Dec 2017 12:40:59 +0000
Subject: [PATCH 128/227] Handle creating duplicate pending storage pools on
 the same node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/storage_pools.go | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 2d6479ab6..87b4feafc 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -142,6 +142,10 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 		return tx.StoragePoolCreatePending(targetNode, req.Name, req.Driver, req.Config)
 	})
 	if err != nil {
+		if err == db.DbErrAlreadyDefined {
+			return BadRequest(
+				fmt.Errorf("The storage pool already defined on node %s", targetNode))
+		}
 		return SmartError(err)
 	}
 

From 28b7a1003c5386a7f39d6e96a5e7d1e003ad5387 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 14 Dec 2017 13:11:58 +0000
Subject: [PATCH 129/227] Extract non-db network create logic

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/networks.go | 61 ++++++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 45 insertions(+), 16 deletions(-)

diff --git a/lxd/networks.go b/lxd/networks.go
index 71cd99190..ffbcedbd7 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -125,6 +125,24 @@ func networksPost(d *Daemon, r *http.Request) Response {
 		return response
 	}
 
+	err = networkFillConfig(&req)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	// Check if we're clustered
+	count, err := cluster.Count(d.State())
+	if err != nil {
+		return SmartError(err)
+	}
+
+	if count > 1 {
+		panic("TODO")
+	}
+
+	// No targetNode was specified and we're either a single-node
+	// cluster or not clustered at all, so create the storage
+	// pool immediately.
 	networks, err := networkGetInterfaces(d.cluster)
 	if err != nil {
 		return InternalError(err)
@@ -134,6 +152,21 @@ func networksPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("The network already exists"))
 	}
 
+	// Create the database entry
+	_, err = d.cluster.NetworkCreate(req.Name, req.Description, req.Config)
+	if err != nil {
+		return SmartError(fmt.Errorf("Error inserting %s into database: %s", req.Name, err))
+	}
+
+	err = doNetworksCreate(d, req)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	return response
+}
+
+func networkFillConfig(req *api.NetworksPost) error {
 	// Set some default values where needed
 	if req.Config["bridge.mode"] == "fan" {
 		if req.Config["fan.underlay_subnet"] == "" {
@@ -159,31 +192,27 @@ func networksPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Replace "auto" by actual values
-	err = networkFillAuto(req.Config)
-	if err != nil {
-		return InternalError(err)
-	}
-
-	// Create the database entry
-	_, err = d.cluster.NetworkCreate(req.Name, req.Description, req.Config)
+	err := networkFillAuto(req.Config)
 	if err != nil {
-		return InternalError(
-			fmt.Errorf("Error inserting %s into database: %s", req.Name, err))
+		return err
 	}
+	return nil
+}
 
+func doNetworksCreate(d *Daemon, req api.NetworksPost) error {
 	// Start the network
 	n, err := networkLoadByName(d.State(), req.Name)
 	if err != nil {
-		return InternalError(err)
+		return err
 	}
 
 	err = n.Start()
 	if err != nil {
 		n.Delete()
-		return InternalError(err)
+		return err
 	}
 
-	return response
+	return nil
 }
 
 var networksCmd = Command{name: "networks", get: networksGet, post: networksPost}
@@ -278,7 +307,7 @@ func networkDelete(d *Daemon, r *http.Request) Response {
 		return NotFound
 	}
 	if isClusterNotification(r) {
-		n.db = nil // We just want to delete the network from the system
+		n.state = nil // We just want to delete the network from the system
 	} else {
 		// Sanity checks
 		if n.IsUsed() {
@@ -287,7 +316,7 @@ func networkDelete(d *Daemon, r *http.Request) Response {
 	}
 
 	// If we're just handling a notification, we're done.
-	if n.db == nil {
+	if n.state == nil {
 		return EmptySyncResponse
 	}
 
@@ -466,7 +495,7 @@ func networkLoadByName(s *state.State, name string) (*network, error) {
 		return nil, err
 	}
 
-	n := network{db: s.Node, state: s, id: id, name: name, description: dbInfo.Description, config: dbInfo.Config}
+	n := network{state: s, id: id, name: name, description: dbInfo.Description, config: dbInfo.Config}
 
 	return &n, nil
 }
@@ -570,7 +599,7 @@ func (n *network) Delete() error {
 		if err != nil {
 			return err
 		}
-		if n.db == nil {
+		if n.state == nil {
 			return nil
 		}
 	}

From 1e37fd2459033c25b26cb7fe8272e23e8611cab9 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 15 Dec 2017 12:50:10 +0000
Subject: [PATCH 130/227] Use lxc-execute instead of unshare to isolate LXD
 nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_networks_test.go    | 33 +++++++++++++++
 lxd/db/networks.go          | 26 ++++++++++++
 lxd/networks.go             | 99 ++++++++++++++++++++++++++++++++++++++++++++-
 test/includes/clustering.sh | 81 ++++++++++++++++++++++++++++++++++---
 test/includes/lxd.sh        |  3 +-
 test/suites/clustering.sh   |  7 ++++
 6 files changed, 241 insertions(+), 8 deletions(-)

diff --git a/lxd/api_networks_test.go b/lxd/api_networks_test.go
index dc51c1c23..90e71bb15 100644
--- a/lxd/api_networks_test.go
+++ b/lxd/api_networks_test.go
@@ -35,3 +35,36 @@ func TestNetworksCreate_TargetNode(t *testing.T) {
 	assert.Equal(t, "PENDING", network.State)
 	assert.Equal(t, []string{"rusp-0"}, network.Nodes)
 }
+
+// An error is returned when trying to create a new network in a cluster where
+// the network was not defined on all nodes.
+func TestNetworksCreate_MissingNodes(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping networks targetNode test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	// Define the network on rusp-0.
+	daemon := daemons[0]
+	client := f.ClientUnix(daemon).ClusterTargetNode("rusp-0")
+
+	networkPost := api.NetworksPost{
+		Name: "mynetwork",
+	}
+
+	err := client.CreateNetwork(networkPost)
+	require.NoError(t, err)
+
+	// Trying to create the network now results in an error, since it's not
+	// defined on all nodes.
+	networkPost = api.NetworksPost{
+		Name: "mynetwork",
+	}
+	client = f.ClientUnix(daemon)
+	err = client.CreateNetwork(networkPost)
+	require.EqualError(t, err, "Network not defined on nodes: buzz")
+}
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 2039c9f6d..16177da78 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -212,6 +212,32 @@ func (c *ClusterTx) NetworkCreatePending(node, name string, conf map[string]stri
 	return nil
 }
 
+// NetworkCreated sets the state of the given network to "CREATED".
+func (c *ClusterTx) NetworkCreated(name string) error {
+	return c.networkState(name, networkCreated)
+}
+
+// NetworkErrored sets the state of the given network to "ERRORED".
+func (c *ClusterTx) NetworkErrored(name string) error {
+	return c.networkState(name, networkErrored)
+}
+
+func (c *ClusterTx) networkState(name string, state int) error {
+	stmt := "UPDATE networks SET state=? WHERE name=?"
+	result, err := c.tx.Exec(stmt, state, name)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return NoSuchObjectError
+	}
+	return nil
+}
+
 func (c *Cluster) Networks() ([]string, error) {
 	q := "SELECT name FROM networks"
 	inargs := []interface{}{}
diff --git a/lxd/networks.go b/lxd/networks.go
index ffbcedbd7..5ddd40656 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -102,6 +102,17 @@ func networksPost(d *Daemon, r *http.Request) Response {
 	url := fmt.Sprintf("/%s/networks/%s", version.APIVersion, req.Name)
 	response := SyncResponseLocation(true, nil, url)
 
+	if isClusterNotification(r) {
+		// This is an internal request which triggers the actual
+		// creation of the network across all nodes, after they have
+		// been previously defined.
+		err = doNetworksCreate(d, req)
+		if err != nil {
+			return SmartError(err)
+		}
+		return response
+	}
+
 	targetNode := r.FormValue("targetNode")
 	if targetNode != "" {
 		// A targetNode was specified, let's just define the node's
@@ -137,7 +148,11 @@ func networksPost(d *Daemon, r *http.Request) Response {
 	}
 
 	if count > 1 {
-		panic("TODO")
+		err = networksPostCluster(d, req)
+		if err != nil {
+			return SmartError(err)
+		}
+		return response
 	}
 
 	// No targetNode was specified and we're either a single-node
@@ -166,6 +181,88 @@ func networksPost(d *Daemon, r *http.Request) Response {
 	return response
 }
 
+func networksPostCluster(d *Daemon, req api.NetworksPost) error {
+	// Check that no 'bridge.external_interfaces' config key has been
+	// defined, since that's node-specific.
+	for key := range req.Config {
+		if key == "bridge.external_interfaces" {
+			return fmt.Errorf("Config key 'bridge.external_interfaces' is node-specific")
+		}
+	}
+
+	// Check that the network is properly defined, fetch the node-specific
+	// configs and insert the global config.
+	var configs map[string]map[string]string
+	var nodeName string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		// Check that the network was defined at all.
+		networkID, err := tx.NetworkID(req.Name)
+		if err != nil {
+			return err
+		}
+
+		// Fetch the node-specific configs.
+		configs, err = tx.NetworkNodeConfigs(networkID)
+		if err != nil {
+			return err
+		}
+
+		// Take note of the name of this node
+		nodeName, err = tx.NodeName()
+		if err != nil {
+			return err
+		}
+
+		// Insert the global config keys.
+		return tx.NetworkConfigAdd(networkID, 0, req.Config)
+	})
+	if err != nil {
+		return err
+	}
+
+	// Create the network on this node.
+	nodeReq := req
+	for key, value := range configs[nodeName] {
+		nodeReq.Config[key] = value
+	}
+	err = doNetworksCreate(d, nodeReq)
+	if err != nil {
+		return err
+	}
+
+	// Notify all other nodes to create the network.
+	notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAll)
+	if err != nil {
+		return err
+	}
+	notifyErr := notifier(func(client lxd.ContainerServer) error {
+		_, _, err := client.GetServer()
+		if err != nil {
+			return err
+		}
+		nodeReq := req
+		for key, value := range configs[client.ClusterNodeName()] {
+			nodeReq.Config[key] = value
+		}
+		return client.CreateNetwork(nodeReq)
+	})
+
+	errored := notifyErr != nil
+
+	// Finally update the storage network state.
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		if errored {
+			return tx.NetworkErrored(req.Name)
+		}
+		return tx.NetworkCreated(req.Name)
+	})
+	if err != nil {
+		return err
+	}
+
+	return notifyErr
+}
+
 func networkFillConfig(req *api.NetworksPost) error {
 	// Set some default values where needed
 	if req.Config["bridge.mode"] == "fan" {
diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index 0695ba9ab..f2502b6e4 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -32,13 +32,81 @@ setup_clustering_netns() {
 
   prefix="lxd$$"
   ns="${prefix}${id}"
+  rcfile="${TEST_DIR}/${ns}.conf"
 
   echo "==> Setup clustering netns ${ns}"
 
+  cat > "${rcfile}" <<EOF
+lxc.mount.entry = cgroup                 sys/fs/cgroup                  tmpfs   rw,nosuid,nodev,noexec,mode=755,create=dir                                   0 0
+lxc.mount.entry = cgroup2                sys/fs/cgroup/unified          cgroup2 rw,nosuid,nodev,noexec,relatime,create=dir                                   0 0
+lxc.mount.entry = name=systemd           sys/fs/cgroup/systemd          cgroup  rw,nosuid,nodev,noexec,relatime,xattr,clone_children,name=systemd,create=dir 0 0
+lxc.mount.entry = net_cls,net_prio       sys/fs/cgroup/net_cls,net_prio cgroup  rw,nosuid,nodev,noexec,relatime,net_cls,net_prio,clone_children,create=dir   0 0
+lxc.mount.entry = cpuset                 sys/fs/cgroup/cpuset           cgroup  rw,nosuid,nodev,noexec,relatime,cpuset,clone_children,create=dir             0 0
+lxc.mount.entry = hugetlb                sys/fs/cgroup/hugetlb          cgroup  rw,nosuid,nodev,noexec,relatime,hugetlb,clone_children,create=dir            0 0
+lxc.mount.entry = blkio                  sys/fs/cgroup/blkio            cgroup  rw,nosuid,nodev,noexec,relatime,blkio,clone_children,create=dir              0 0
+lxc.mount.entry = cpu,cpuacct            sys/fs/cgroup/cpu,cpuacct      cgroup  rw,nosuid,nodev,noexec,relatime,cpu,cpuacct,clone_children,create=dir        0 0
+lxc.mount.entry = pids                   sys/fs/cgroup/pids             cgroup  rw,nosuid,nodev,noexec,relatime,pids,clone_children,create=dir               0 0
+lxc.mount.entry = rdma                   sys/fs/cgroup/rdma             cgroup  rw,nosuid,nodev,noexec,relatime,rdma,clone_children,create=dir               0 0
+lxc.mount.entry = perf_event             sys/fs/cgroup/perf_event       cgroup  rw,nosuid,nodev,noexec,relatime,perf_event,clone_children,create=dir         0 0
+lxc.mount.entry = memory                 sys/fs/cgroup/memory           cgroup  rw,nosuid,nodev,noexec,relatime,memory,clone_children,create=dir             0 0
+lxc.mount.entry = freezer                sys/fs/cgroup/freezer          cgroup  rw,nosuid,nodev,noexec,relatime,freezer,clone_children,create=dir            0 0
+lxc.mount.entry = /sys/fs/cgroup/devices sys/fs/cgroup/devices          none    bind,create=dir 0 0
+
+# CGroup whitelist
+lxc.cgroup.devices.deny = a
+## Allow any mknod (but not reading/writing the node)
+lxc.cgroup.devices.allow = c *:* m
+lxc.cgroup.devices.allow = b *:* m
+## Allow specific devices
+### /dev/null
+lxc.cgroup.devices.allow = c 1:3 rwm
+### /dev/zero
+lxc.cgroup.devices.allow = c 1:5 rwm
+### /dev/full
+lxc.cgroup.devices.allow = c 1:7 rwm
+### /dev/tty
+lxc.cgroup.devices.allow = c 5:0 rwm
+### /dev/console
+lxc.cgroup.devices.allow = c 5:1 rwm
+### /dev/ptmx
+lxc.cgroup.devices.allow = c 5:2 rwm
+### /dev/random
+lxc.cgroup.devices.allow = c 1:8 rwm
+### /dev/urandom
+lxc.cgroup.devices.allow = c 1:9 rwm
+### /dev/pts/*
+lxc.cgroup.devices.allow = c 136:* rwm
+### fuse
+lxc.cgroup.devices.allow = c 10:229 rwm
+### loop
+lxc.cgroup.devices.allow = b 7:* rwm
+
+lxc.apparmor.profile = unconfined
+
+lxc.pty.max = 1024
+lxc.tty.max = 10
+lxc.environment=TERM=xterm
+
+lxc.hook.version = 1
+lxc.hook.autodev = mknod /dev/loop-control c 10, 237
+lxc.hook.autodev = mknod /dev/loop0 c 7 0
+lxc.hook.autodev = mknod /dev/loop1 c 7 1
+lxc.hook.autodev = mknod /dev/loop2 c 7 2
+lxc.hook.autodev = mknod /dev/loop3 c 7 3
+lxc.hook.autodev = mknod /dev/loop4 c 7 4
+lxc.hook.autodev = mknod /dev/loop5 c 7 5
+lxc.hook.autodev = mknod /dev/loop6 c 7 6
+lxc.hook.autodev = mknod /dev/loop7 c 7 7
+lxc.hook.autodev = mknod /dev/console c 5 1
+EOF
+  lxc-execute -n "${ns}" --rcfile "${rcfile}" -- sh -c 'while true; do sleep 1; done' &
+  sleep 1
+
   mkdir -p /run/netns
   touch "/run/netns/${ns}"
 
-  unshare -n sh -c "mount --bind /proc/self/ns/net /run/netns/${ns}"
+  pid="$(lxc-info -n "${ns}" -p | cut -f 2 -d : | tr -d " ")"
+  mount --bind "/proc/${pid}/ns/net" "/run/netns/${ns}"
 
   veth1="v${ns}1"
   veth2="v${ns}2"
@@ -50,7 +118,6 @@ setup_clustering_netns() {
   brctl addif "${nsbridge}" "${veth1}"
 
   ip link set "${veth1}" up
-
   (
     cat <<EOF
     ip link set dev lo up
@@ -59,23 +126,25 @@ setup_clustering_netns() {
     ip addr add "10.1.1.10${id}/16" dev eth0
     ip route add default via 10.1.1.1
 EOF
-  ) | nsenter --net="/run/netns/${ns}" sh
+  ) | nsenter --all --target="${pid}" sh
 }
 
 teardown_clustering_netns() {
   prefix="lxd$$"
   nsbridge="br$$"
-  for ns in $(ip netns | grep "${prefix}" | cut -f 1 -d " ") ; do
+  for ns in $(lxc-ls | grep "${prefix}") ; do
       echo "==> Teardown clustering netns ${ns}"
+      pid="$(lxc-info -n "${ns}" -p | cut -f 2 -d : | tr -d " ")"
       veth1="v${ns}1"
       veth2="v${ns}2"
-      nsenter --net="/run/netns/${ns}" ip link set eth0 down
-      nsenter --net="/run/netns/${ns}" ip link set lo down
+      nsenter --all --target="${pid}" ip link set eth0 down
+      nsenter --all --target="${pid}" ip link set lo down
       ip link set "${veth1}" down
       brctl delif "${nsbridge}" "${veth1}"
       ip link delete "${veth1}" type veth
       umount "/run/netns/${ns}"
       rm "/run/netns/${ns}"
+      lxc-stop -n "${ns}"
   done
 }
 
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index bd3573fd4..ad88ea2aa 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -43,7 +43,8 @@ spawn_lxd() {
     if [ "${LXD_NETNS}" = "" ]; then
 	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     else
-	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" nsenter --net="/run/netns/${LXD_NETNS}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+	pid="$(lxc-info -n "${LXD_NETNS}" -p | cut -f 2 -d : | tr -d " ")"
+	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" nsenter --all --target="${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     fi
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 68f741450..92ad5e5fb 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -283,6 +283,13 @@ test_clustering_network() {
   ! LXD_DIR="${LXD_ONE_DIR}" lxc network create "${net}" --target node2
   LXD_DIR="${LXD_ONE_DIR}" lxc network show "${net}" | grep state: | grep -q PENDING
 
+  # The bridge.external_interfaces config key is not legal for the final network creation
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc network create "${net}" bridge.external_interfaces=foo
+
+  # Create the network
+  LXD_DIR="${LXD_TWO_DIR}" lxc network create "${net}"
+  LXD_DIR="${LXD_ONE_DIR}" lxc network show "${net}" | grep state: | grep -q CREATED
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From a4239b172df35cd1753af792479076d105aadfbf Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 16 Dec 2017 07:17:05 +0000
Subject: [PATCH 131/227] Properly delete networks across the cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/networks.go           | 44 +++++++++++++++++++++++---------------------
 test/suites/clustering.sh |  4 ++++
 2 files changed, 27 insertions(+), 21 deletions(-)

diff --git a/lxd/networks.go b/lxd/networks.go
index 5ddd40656..7475018b0 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -106,7 +106,7 @@ func networksPost(d *Daemon, r *http.Request) Response {
 		// This is an internal request which triggers the actual
 		// creation of the network across all nodes, after they have
 		// been previously defined.
-		err = doNetworksCreate(d, req)
+		err = doNetworksCreate(d, req, true)
 		if err != nil {
 			return SmartError(err)
 		}
@@ -173,7 +173,7 @@ func networksPost(d *Daemon, r *http.Request) Response {
 		return SmartError(fmt.Errorf("Error inserting %s into database: %s", req.Name, err))
 	}
 
-	err = doNetworksCreate(d, req)
+	err = doNetworksCreate(d, req, true)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -225,7 +225,7 @@ func networksPostCluster(d *Daemon, req api.NetworksPost) error {
 	for key, value := range configs[nodeName] {
 		nodeReq.Config[key] = value
 	}
-	err = doNetworksCreate(d, nodeReq)
+	err = doNetworksCreate(d, nodeReq, false)
 	if err != nil {
 		return err
 	}
@@ -296,7 +296,9 @@ func networkFillConfig(req *api.NetworksPost) error {
 	return nil
 }
 
-func doNetworksCreate(d *Daemon, req api.NetworksPost) error {
+// Create the network on the system. The withDatabase flag is used to decide
+// whether to cleanup the database if an error occurs.
+func doNetworksCreate(d *Daemon, req api.NetworksPost, withDatabase bool) error {
 	// Start the network
 	n, err := networkLoadByName(d.State(), req.Name)
 	if err != nil {
@@ -305,6 +307,9 @@ func doNetworksCreate(d *Daemon, req api.NetworksPost) error {
 
 	err = n.Start()
 	if err != nil {
+		if !withDatabase {
+			n.state = nil
+		}
 		n.Delete()
 		return err
 	}
@@ -413,22 +418,16 @@ func networkDelete(d *Daemon, r *http.Request) Response {
 	}
 
 	// If we're just handling a notification, we're done.
-	if n.state == nil {
-		return EmptySyncResponse
-	}
-
-	// Notify all other nodes. If any node is down, an error will be returned.
-	notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAll)
-	if err != nil {
-		return SmartError(err)
-	}
-	err = notifier(func(client lxd.ContainerServer) error {
-		_, _, err := client.GetServer()
+	if n.state != nil {
+		// Notify all other nodes. If any node is down, an error will be returned.
+		notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAll)
 		if err != nil {
-			return err
+			return SmartError(err)
 		}
-		return client.DeleteNetwork(name)
-	})
+		err = notifier(func(client lxd.ContainerServer) error {
+			return client.DeleteNetwork(name)
+		})
+	}
 	if err != nil {
 		return SmartError(err)
 	}
@@ -696,9 +695,12 @@ func (n *network) Delete() error {
 		if err != nil {
 			return err
 		}
-		if n.state == nil {
-			return nil
-		}
+	}
+
+	// If state is nil, this is a cluster notification, and we don't want
+	// to perform any database work.
+	if n.state == nil {
+		return nil
 	}
 
 	// Remove the network from the database
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 92ad5e5fb..3f40f24fe 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -290,6 +290,10 @@ test_clustering_network() {
   LXD_DIR="${LXD_TWO_DIR}" lxc network create "${net}"
   LXD_DIR="${LXD_ONE_DIR}" lxc network show "${net}" | grep state: | grep -q CREATED
 
+  # Delete the networks
+  LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${net}"
+  LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From e00a9a0cfd87aa8292b681b51d828c95877d276e Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 18 Dec 2017 10:50:36 +0000
Subject: [PATCH 132/227] Allow empty string when setting
 bridge.network_interfaces

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/main_init.go | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/lxd/main_init.go b/lxd/main_init.go
index 6f0a0cbb0..77a198e20 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -872,10 +872,11 @@ func (cmd *CmdInit) askClusteringNetworks(cluster *api.Cluster) ([]api.NetworksP
 		// The only config key to ask is 'bridge.external_interfaces',
 		// which is the only one node-specific.
 		key := "bridge.external_interfaces"
-		// Sort config keys to get a stable ordering (expecially for tests)
 		question := fmt.Sprintf(
 			`Enter local value for key "%s" of network "%s": `, key, post.Name)
-		post.Config[key] = cmd.Context.AskString(question, "", nil)
+		// Dummy validator for allowing empty strings.
+		validator := func(string) error { return nil }
+		post.Config[key] = cmd.Context.AskString(question, "", validator)
 		networks[i] = post
 	}
 	return networks, nil

From 022cedcd2420ceb47f4d18d85ba05421bf2e2def Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 18 Dec 2017 16:22:50 +0000
Subject: [PATCH 133/227] Allow entering an address without port when joining a
 cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/main_init.go     | 3 ++-
 lxd/util/net_test.go | 1 +
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/lxd/main_init.go b/lxd/main_init.go
index 77a198e20..a39d03e2d 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -812,7 +812,8 @@ func (cmd *CmdInit) askClustering() (*cmdInitClusteringParams, error) {
 
 	// Target node address, password and certificate.
 join:
-	params.TargetAddress = cmd.Context.AskString("IP address or FQDN of an existing cluster node: ", "", nil)
+	targetAddress := cmd.Context.AskString("IP address or FQDN of an existing cluster node: ", "", nil)
+	params.TargetAddress = util.CanonicalNetworkAddress(targetAddress)
 	params.TargetPassword = cmd.Context.AskPassword(
 		"Trust password for the existing cluster: ", cmd.PasswordReader)
 
diff --git a/lxd/util/net_test.go b/lxd/util/net_test.go
index a56581464..cdca74bf5 100644
--- a/lxd/util/net_test.go
+++ b/lxd/util/net_test.go
@@ -29,6 +29,7 @@ func TestInMemoryNetwork(t *testing.T) {
 func TestCanonicalNetworkAddress(t *testing.T) {
 	cases := map[string]string{
 		"127.0.0.1":                             "127.0.0.1:8443",
+		"foo.bar":                               "foo.bar:8443",
 		"192.168.1.1:443":                       "192.168.1.1:443",
 		"f921:7358:4510:3fce:ac2e:844:2a35:54e": "[f921:7358:4510:3fce:ac2e:844:2a35:54e]:8443",
 	}

From dde7ad55c2015d23336f7d8a0beaff6497f3f9d9 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 19 Dec 2017 08:12:26 +0000
Subject: [PATCH 134/227] Wait up to 10 mins for quorum when opening the
 clustering database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go               |  2 +-
 lxd/db/db.go                | 21 +++++++++++++++++++++
 lxd/db/query/transaction.go |  4 +++-
 3 files changed, 25 insertions(+), 2 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 0a41e0fd2..dd01560a1 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -610,7 +610,7 @@ func (d *Daemon) Stop() error {
 	trackError(d.tasks.Stop(time.Second)) // Give tasks at most a second to cleanup.
 
 	shouldUnmount := false
-	if d.db != nil {
+	if d.cluster != nil {
 		// It might be that database nodes are all down, in that case
 		// we don't want to wait too much.
 		//
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 9abc84a7c..d5d8a840a 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -9,6 +9,7 @@ import (
 	"github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/mattn/go-sqlite3"
 	"github.com/pkg/errors"
+	"golang.org/x/net/context"
 
 	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/db/node"
@@ -163,6 +164,26 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 		return nil, errors.Wrap(err, "failed to open database")
 	}
 
+	// Test that the cluster database is operational. We wait up to 10
+	// minutes, in case there's no quorum of nodes online yet.
+	timeout := time.After(10 * time.Minute)
+	for {
+		err = db.Ping()
+		if err == nil {
+			break
+		}
+		cause := errors.Cause(err)
+		if cause != context.DeadlineExceeded {
+			return nil, err
+		}
+		time.Sleep(10 * time.Second)
+		select {
+		case <-timeout:
+			return nil, fmt.Errorf("failed to connect to cluster database")
+		default:
+		}
+	}
+
 	_, err = cluster.EnsureSchema(db, address)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to ensure schema")
diff --git a/lxd/db/query/transaction.go b/lxd/db/query/transaction.go
index 4d23d1f11..6d6ab7a37 100644
--- a/lxd/db/query/transaction.go
+++ b/lxd/db/query/transaction.go
@@ -3,13 +3,15 @@ package query
 import (
 	"database/sql"
 	"fmt"
+
+	"github.com/pkg/errors"
 )
 
 // Transaction executes the given function within a database transaction.
 func Transaction(db *sql.DB, f func(*sql.Tx) error) error {
 	tx, err := db.Begin()
 	if err != nil {
-		return fmt.Errorf("failed to begin transaction: %v", err)
+		return errors.Wrap(err, "failed to begin transaction")
 	}
 
 	err = f(tx)

From 2155c4c293a20d7c30388e8f982152f47cff2f2c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 19 Dec 2017 11:25:50 +0000
Subject: [PATCH 135/227] Fix duplicates in containers listing when fetching
 remote nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/containers_get.go | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 425ef8590..49911409c 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -54,15 +54,15 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
 	resultList := []*api.Container{}
 	resultMu := sync.Mutex{}
 
-	resultAppend := func(name string, c *api.Container, err error) {
+	resultAppend := func(name string, c api.Container, err error) {
 		if err != nil {
-			c = &api.Container{
+			c = api.Container{
 				Name:       name,
 				Status:     api.Error.String(),
 				StatusCode: api.Error}
 		}
 		resultMu.Lock()
-		resultList = append(resultList, c)
+		resultList = append(resultList, &c)
 		resultMu.Unlock()
 	}
 
@@ -71,10 +71,17 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
 		// Mark containers on unavailable nodes as down
 		if recursion && address == "0.0.0.0" {
 			for _, container := range containers {
-				resultAppend(container, nil, fmt.Errorf("unavailable"))
+				resultAppend(container, api.Container{}, fmt.Errorf("unavailable"))
 			}
 		}
 
+		// If this is an internal request from another cluster node,
+		// ignore containers from other nodes, and return only the ones
+		// on this node
+		if isClusterNotification(r) && address != "" {
+			continue
+		}
+
 		// For recursion requests we need to fetch the state of remote
 		// containers from their respective nodes.
 		if recursion && address != "" && !isClusterNotification(r) {
@@ -83,7 +90,7 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
 				cert := d.endpoints.NetworkCert()
 				cs, err := doContainersGetFromNode(address, cert)
 				for _, c := range cs {
-					resultAppend(c.Name, &c, err)
+					resultAppend(c.Name, c, err)
 				}
 				wg.Done()
 			}(address)
@@ -98,7 +105,7 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
 			}
 
 			c, err := doContainerGet(d.State(), container)
-			resultAppend(container, c, err)
+			resultAppend(container, *c, err)
 		}
 	}
 	wg.Wait()

From 06e4706019a8860d4e4c009450b64cf8e6b7787e Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 19 Dec 2017 12:48:32 +0000
Subject: [PATCH 136/227] Increase unit test timeout

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/.dir-locals.el | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/.dir-locals.el b/lxd/.dir-locals.el
index 9342fb083..f7a84f6da 100644
--- a/lxd/.dir-locals.el
+++ b/lxd/.dir-locals.el
@@ -1,7 +1,7 @@
 ;;; Directory Local Variables
 ;;; For more information see (info "(emacs) Directory Variables")
 ((go-mode
-  . ((go-test-args . "-tags libsqlite3 -timeout 35s")
+  . ((go-test-args . "-tags libsqlite3 -timeout 60s")
      (eval
       . (set
 	 (make-local-variable 'flycheck-go-build-tags)

From b172818e9d97a44fd7cbf6aab317e83237a4d16e Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 3 Jan 2018 13:26:38 +0000
Subject: [PATCH 137/227] Add retry logic for transient errors in query.Retry

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/containers_get.go       |  3 ++-
 lxd/db/cluster/open.go      | 11 ++++++---
 lxd/db/db.go                | 56 ++++++++-------------------------------------
 lxd/db/query/retry.go       | 53 ++++++++++++++++++++++++++++++++++++++++++
 lxd/db/query/transaction.go | 10 +++++---
 5 files changed, 80 insertions(+), 53 deletions(-)
 create mode 100644 lxd/db/query/retry.go

diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 49911409c..ba9c4b763 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -9,6 +9,7 @@ import (
 
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -24,7 +25,7 @@ func containersGet(d *Daemon, r *http.Request) Response {
 		if err == nil {
 			return SyncResponse(true, result)
 		}
-		if !db.IsRetriableError(err) {
+		if !query.IsRetriableError(err) {
 			logger.Debugf("DBERR: containersGet: error %q", err)
 			return SmartError(err)
 		}
diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index fbc678178..f12b024e0 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -6,6 +6,7 @@ import (
 	"sync/atomic"
 
 	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/lxd/db/schema"
 	"github.com/lxc/lxd/shared/version"
 	"github.com/pkg/errors"
@@ -72,8 +73,7 @@ func EnsureSchema(db *sql.DB, address string) (bool, error) {
 		// Update the schema and api_extension columns of ourselves.
 		err = updateNodeVersion(tx, address, apiExtensions)
 		if err != nil {
-			return errors.Wrap(err, "failed to update node version")
-
+			return err
 		}
 
 		err = checkClusterIsUpgradable(tx, [2]int{len(updates), apiExtensions})
@@ -87,7 +87,12 @@ func EnsureSchema(db *sql.DB, address string) (bool, error) {
 	schema := Schema()
 	schema.Check(check)
 
-	initial, err := schema.Ensure(db)
+	var initial int
+	err := query.Retry(func() error {
+		var err error
+		initial, err = schema.Ensure(db)
+		return err
+	})
 	if someNodesAreBehind {
 		return false, nil
 	}
diff --git a/lxd/db/db.go b/lxd/db/db.go
index d5d8a840a..214b0a603 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -3,11 +3,9 @@ package db
 import (
 	"database/sql"
 	"fmt"
-	"strings"
 	"time"
 
 	"github.com/CanonicalLtd/go-grpc-sql"
-	"github.com/mattn/go-sqlite3"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 
@@ -236,21 +234,12 @@ func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 		nodeID: c.nodeID,
 	}
 
-	// FIXME: the retry loop should be configurable.
-	var err error
-	for i := 0; i < 20; i++ {
-		err = query.Transaction(c.db, func(tx *sql.Tx) error {
+	return query.Retry(func() error {
+		return query.Transaction(c.db, func(tx *sql.Tx) error {
 			clusterTx.tx = tx
 			return f(clusterTx)
 		})
-		if err != nil && IsRetriableError(err) {
-			logger.Debugf("Retry failed transaction")
-			time.Sleep(250 * time.Millisecond)
-			continue
-		}
-		break
-	}
-	return err
+	})
 }
 
 // NodeID sets the the node NodeID associated with this cluster instance. It's used for
@@ -296,31 +285,6 @@ func UpdateSchemasDotGo() error {
 	return nil
 }
 
-// IsRetriableError returns true if the given error might be transient and the
-// interaction can be safely retried.
-func IsRetriableError(err error) bool {
-	if err == nil {
-		return false
-	}
-	if err == sqlite3.ErrLocked || err == sqlite3.ErrBusy {
-		return true
-	}
-	if err.Error() == "database is locked" {
-		return true
-	}
-
-	// FIXME: we should bubble errors using errors.Wrap()
-	// instead, and check for err.Cause() == sql.ErrBadConnection.
-	if strings.Contains(err.Error(), "bad connection") {
-		return true
-	}
-	if strings.Contains(err.Error(), "leadership lost") {
-		return true
-	}
-
-	return false
-}
-
 func isNoMatchError(err error) bool {
 	if err == nil {
 		return false
@@ -337,7 +301,7 @@ func begin(db *sql.DB) (*sql.Tx, error) {
 		if err == nil {
 			return tx, nil
 		}
-		if !IsRetriableError(err) {
+		if !query.IsRetriableError(err) {
 			logger.Debugf("DbBegin: error %q", err)
 			return nil, err
 		}
@@ -352,10 +316,10 @@ func begin(db *sql.DB) (*sql.Tx, error) {
 func TxCommit(tx *sql.Tx) error {
 	for i := 0; i < 1000; i++ {
 		err := tx.Commit()
-		if err == nil {
+		if err == nil || err == sql.ErrTxDone { // Ignore duplicate commits/rollbacks
 			return nil
 		}
-		if !IsRetriableError(err) {
+		if !query.IsRetriableError(err) {
 			logger.Debugf("Txcommit: error %q", err)
 			return err
 		}
@@ -376,7 +340,7 @@ func dbQueryRowScan(db *sql.DB, q string, args []interface{}, outargs []interfac
 		if isNoMatchError(err) {
 			return err
 		}
-		if !IsRetriableError(err) {
+		if !query.IsRetriableError(err) {
 			return err
 		}
 		time.Sleep(30 * time.Millisecond)
@@ -393,7 +357,7 @@ func dbQuery(db *sql.DB, q string, args ...interface{}) (*sql.Rows, error) {
 		if err == nil {
 			return result, nil
 		}
-		if !IsRetriableError(err) {
+		if !query.IsRetriableError(err) {
 			logger.Debugf("DbQuery: query %q error %q", q, err)
 			return nil, err
 		}
@@ -478,7 +442,7 @@ func queryScan(qi queryer, q string, inargs []interface{}, outfmt []interface{})
 		if err == nil {
 			return result, nil
 		}
-		if !IsRetriableError(err) {
+		if !query.IsRetriableError(err) {
 			logger.Debugf("DbQuery: query %q error %q", q, err)
 			return nil, err
 		}
@@ -496,7 +460,7 @@ func exec(db *sql.DB, q string, args ...interface{}) (sql.Result, error) {
 		if err == nil {
 			return result, nil
 		}
-		if !IsRetriableError(err) {
+		if !query.IsRetriableError(err) {
 			logger.Debugf("DbExec: query %q error %q", q, err)
 			return nil, err
 		}
diff --git a/lxd/db/query/retry.go b/lxd/db/query/retry.go
new file mode 100644
index 000000000..6209ab541
--- /dev/null
+++ b/lxd/db/query/retry.go
@@ -0,0 +1,53 @@
+package query
+
+import (
+	"strings"
+	"time"
+
+	"github.com/lxc/lxd/shared/logger"
+	sqlite3 "github.com/mattn/go-sqlite3"
+)
+
+// Retry wraps a function that interacts with the database, and retries it in
+// case a transient error is hit.
+//
+// This should by typically used to wrap transactions.
+func Retry(f func() error) error {
+	// TODO: the retry loop should be configurable.
+	var err error
+	for i := 0; i < 20; i++ {
+		err = f()
+		if err != nil && IsRetriableError(err) {
+			logger.Debugf("Retry failed db interaction (%v)", err)
+			time.Sleep(250 * time.Millisecond)
+			continue
+		}
+		break
+	}
+	return err
+}
+
+// IsRetriableError returns true if the given error might be transient and the
+// interaction can be safely retried.
+func IsRetriableError(err error) bool {
+	if err == nil {
+		return false
+	}
+	if err == sqlite3.ErrLocked || err == sqlite3.ErrBusy {
+		return true
+	}
+	if err.Error() == "database is locked" {
+		return true
+	}
+
+	// FIXME: we should bubble errors using errors.Wrap()
+	// instead, and check for err.Cause() == sql.ErrBadConnection.
+	if strings.Contains(err.Error(), "bad connection") {
+		return true
+	}
+	if strings.Contains(err.Error(), "leadership lost") {
+		return true
+	}
+
+	return false
+}
diff --git a/lxd/db/query/transaction.go b/lxd/db/query/transaction.go
index 6d6ab7a37..94aa9badc 100644
--- a/lxd/db/query/transaction.go
+++ b/lxd/db/query/transaction.go
@@ -2,8 +2,8 @@ package query
 
 import (
 	"database/sql"
-	"fmt"
 
+	"github.com/lxc/lxd/shared/logger"
 	"github.com/pkg/errors"
 )
 
@@ -19,7 +19,11 @@ func Transaction(db *sql.DB, f func(*sql.Tx) error) error {
 		return rollback(tx, err)
 	}
 
-	return tx.Commit()
+	err = tx.Commit()
+	if err == sql.ErrTxDone {
+		err = nil // Ignore duplicate commits/rollbacks
+	}
+	return err
 }
 
 // Rollback a transaction after the given error occurred. If the rollback
@@ -28,7 +32,7 @@ func Transaction(db *sql.DB, f func(*sql.Tx) error) error {
 func rollback(tx *sql.Tx, reason error) error {
 	err := tx.Rollback()
 	if err != nil {
-		return fmt.Errorf("failed to rollback transaction after error (%v)", reason)
+		logger.Warnf("failed to rollback transaction after error (%v): %v", reason, err)
 	}
 
 	return reason

From 72f1bced6e9090e115822dd6f5c71981914de62f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 3 Jan 2018 16:20:30 +0000
Subject: [PATCH 138/227] Provide the node name in lxc list even when they are
 offline

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/containers_get.go     | 34 +++++++++++++++++++++++++++-------
 lxd/db/containers.go      | 36 +++++++++++++++++++++++++++++++++++-
 lxd/db/containers_test.go | 23 +++++++++++++++++++++++
 3 files changed, 85 insertions(+), 8 deletions(-)

diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index ba9c4b763..7904b53fb 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -40,11 +40,22 @@ func containersGet(d *Daemon, r *http.Request) Response {
 }
 
 func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
-	var result map[string][]string
+	var result map[string][]string // Containers by node address
+	var nodes map[string]string    // Node names by container
 	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
 		var err error
+
 		result, err = tx.ContainersListByNodeAddress()
-		return err
+		if err != nil {
+			return err
+		}
+
+		nodes, err = tx.ContainersByNodeName()
+		if err != nil {
+			return err
+		}
+
+		return nil
 	})
 	if err != nil {
 		return []string{}, err
@@ -60,7 +71,9 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
 			c = api.Container{
 				Name:       name,
 				Status:     api.Error.String(),
-				StatusCode: api.Error}
+				StatusCode: api.Error,
+				Node:       nodes[name],
+			}
 		}
 		resultMu.Lock()
 		resultList = append(resultList, &c)
@@ -74,6 +87,7 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
 			for _, container := range containers {
 				resultAppend(container, api.Container{}, fmt.Errorf("unavailable"))
 			}
+			continue
 		}
 
 		// If this is an internal request from another cluster node,
@@ -87,14 +101,20 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
 		// containers from their respective nodes.
 		if recursion && address != "" && !isClusterNotification(r) {
 			wg.Add(1)
-			go func(address string) {
+			go func(address string, containers []string) {
+				defer wg.Done()
 				cert := d.endpoints.NetworkCert()
 				cs, err := doContainersGetFromNode(address, cert)
+				if err != nil {
+					for _, name := range containers {
+						resultAppend(name, api.Container{}, err)
+					}
+					return
+				}
 				for _, c := range cs {
-					resultAppend(c.Name, c, err)
+					resultAppend(c.Name, c, nil)
 				}
-				wg.Done()
-			}(address)
+			}(address, containers)
 			continue
 		}
 
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index a2ddcbc58..c97667376 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -107,7 +107,8 @@ SELECT containers.name, nodes.id, nodes.address, nodes.heartbeat
 	result := map[string][]string{}
 
 	for i := 0; rows.Next(); i++ {
-		var name, nodeAddress string
+		var name string
+		var nodeAddress string
 		var nodeID int64
 		var nodeHeartbeat time.Time
 		err := rows.Scan(&name, &nodeID, &nodeAddress, &nodeHeartbeat)
@@ -129,6 +130,39 @@ SELECT containers.name, nodes.id, nodes.address, nodes.heartbeat
 	return result, nil
 }
 
+// ContainersByNodeName returns a map associating each container to the name of
+// its node.
+func (c *ClusterTx) ContainersByNodeName() (map[string]string, error) {
+	stmt := `
+SELECT containers.name, nodes.name
+  FROM containers JOIN nodes ON nodes.id = containers.node_id
+  WHERE containers.type=?
+`
+	rows, err := c.tx.Query(stmt, CTypeRegular)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	result := map[string]string{}
+
+	for i := 0; rows.Next(); i++ {
+		var name string
+		var nodeName string
+		err := rows.Scan(&name, &nodeName)
+		if err != nil {
+			return nil, err
+		}
+		result[name] = nodeName
+	}
+
+	err = rows.Err()
+	if err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
 func (c *Cluster) ContainerRemove(name string) error {
 	id, err := c.ContainerId(name)
 	if err != nil {
diff --git a/lxd/db/containers_test.go b/lxd/db/containers_test.go
index a5a68c444..9f8e035d6 100644
--- a/lxd/db/containers_test.go
+++ b/lxd/db/containers_test.go
@@ -39,6 +39,29 @@ func TestContainersListByNodeAddress(t *testing.T) {
 		}, result)
 }
 
+// Containers are associated with their node name.
+func TestContainersByNodeName(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	nodeID1 := int64(1) // This is the default local node
+
+	nodeID2, err := tx.NodeAdd("node2", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	addContainer(t, tx, nodeID2, "c1")
+	addContainer(t, tx, nodeID1, "c2")
+
+	result, err := tx.ContainersByNodeName()
+	require.NoError(t, err)
+	assert.Equal(
+		t,
+		map[string]string{
+			"c1": "node2",
+			"c2": "none",
+		}, result)
+}
+
 func addContainer(t *testing.T, tx *db.ClusterTx, nodeID int64, name string) {
 	stmt := `
 INSERT INTO containers(node_id, name, architecture, type) VALUES (?, ?, 1, ?)

From 7a10fca79a757a9a5c115bdfab172a39038d5c2c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 4 Jan 2018 09:12:25 +0000
Subject: [PATCH 139/227] Add support for lxc launch --target <node>

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/launch.go             | 2 +-
 test/suites/clustering.sh | 3 +--
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/lxc/launch.go b/lxc/launch.go
index ac81f2ded..d31526844 100644
--- a/lxc/launch.go
+++ b/lxc/launch.go
@@ -18,7 +18,7 @@ func (c *launchCmd) showByDefault() bool {
 
 func (c *launchCmd) usage() string {
 	return i18n.G(
-		`Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>]
+		`Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target <node>]
 
 Create and start containers from images.
 
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 3f40f24fe..f61d6294d 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -151,8 +151,7 @@ test_clustering_containers() {
 
   # Create a container on node1 using the image that was stored on
   # node2.
-  LXD_DIR="${LXD_TWO_DIR}" lxc init --target node1 testimage bar
-  LXD_DIR="${LXD_ONE_DIR}" lxc start bar
+  LXD_DIR="${LXD_TWO_DIR}" lxc launch --target node1 testimage bar
   LXD_DIR="${LXD_TWO_DIR}" lxc stop bar
   LXD_DIR="${LXD_ONE_DIR}" lxc delete bar
   ! LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q bar

From 10cab14e4bf1ff3b9c54dde753d16742712982fd Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 8 Jan 2018 13:16:26 +0000
Subject: [PATCH 140/227] Prevent cluster database activity during bootstrap or
 join

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go | 30 +++++++++++++++++++-----------
 lxd/db/db.go              | 24 ++++++++++++++++++++++++
 2 files changed, 43 insertions(+), 11 deletions(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index e55ba85ba..f0a6cdbd0 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -85,7 +85,10 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 
 	// Shutdown the gateway. This will trash any gRPC SQL connection
 	// against our in-memory dqlite driver and shutdown the associated raft
-	// instance.
+	// instance. We also lock regular access to the cluster database since
+	// we don't want any other database code to run while we're
+	// reconfiguring raft.
+	state.Cluster.EnterExclusive()
 	err = gateway.Shutdown()
 	if err != nil {
 		return errors.Wrap(err, "failed to shutdown gRPC SQL gateway")
@@ -116,10 +119,11 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 	}
 
 	// Make sure we can actually connect to the cluster database through
-	// the network endpoint. This also makes the Go SQL pooling system
-	// invalidate the old connection, so new queries will be executed over
-	// the new gRPC network connection.
-	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+	// the network endpoint. This also releases the previously acquired
+	// lock and makes the Go SQL pooling system invalidate the old
+	// connection, so new queries will be executed over the new gRPC
+	// network connection.
+	err = state.Cluster.ExitExclusive(func(tx *db.ClusterTx) error {
 		_, err := tx.Nodes()
 		return err
 	})
@@ -268,7 +272,10 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 
 	// Re-initialize the gateway. This will create a new raft factory an
 	// dqlite driver instance, which will be exposed over gRPC by the
-	// gateway handlers.
+	// gateway handlers. We also lock regular access to the cluster database since
+	// we don't want any other database code to run while we're
+	// reconfiguring raft.
+	state.Cluster.EnterExclusive()
 	gateway.cert = cert
 	err = gateway.init()
 	if err != nil {
@@ -297,11 +304,12 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 	}
 
 	// Make sure we can actually connect to the cluster database through
-	// the network endpoint. This also makes the Go SQL pooling system
-	// invalidate the old connection, so new queries will be executed over
-	// the new gRPC network connection. Also, update the storage_pools and
-	// networks tables with our local configuration.
-	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+	// the network endpoint. This also releases the previously acquired
+	// lock and makes the Go SQL pooling system invalidate the old
+	// connection, so new queries will be executed over the new gRPC
+	// network connection. Also, update the storage_pools and networks
+	// tables with our local configuration.
+	err = state.Cluster.ExitExclusive(func(tx *db.ClusterTx) error {
 		node, err := tx.NodeByAddress(address)
 		if err != nil {
 			return errors.Wrap(err, "failed to get ID of joining node")
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 214b0a603..07e7fed3c 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -3,6 +3,7 @@ package db
 import (
 	"database/sql"
 	"fmt"
+	"sync"
 	"time"
 
 	"github.com/CanonicalLtd/go-grpc-sql"
@@ -142,6 +143,7 @@ func (n *Node) Begin() (*sql.Tx, error) {
 type Cluster struct {
 	db     *sql.DB // Handle to the cluster dqlite database, gated behind gRPC SQL.
 	nodeID int64   // Node ID of this LXD instance.
+	mu     sync.RWMutex
 }
 
 // OpenCluster creates a new Cluster object for interacting with the dqlite
@@ -229,7 +231,29 @@ func ForLocalInspection(db *sql.DB) *Cluster {
 // cluster database interactions invoked by the given function. If the function
 // returns no error, all database changes are committed to the cluster database
 // database, otherwise they are rolled back.
+//
+// If EnterExclusive has been called before, calling Transaction will block
+// until ExitExclusive has been called as well to release the lock.
 func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.transaction(f)
+}
+
+// EnterExclusive acquires a lock on the cluster db, so any successive call to
+// Transaction will block until ExitExclusive has been called.
+func (c *Cluster) EnterExclusive() {
+	c.mu.Lock()
+}
+
+// ExitExclusive runs the given transation and then releases the lock acquired
+// with EnterExclusive.
+func (c *Cluster) ExitExclusive(f func(*ClusterTx) error) error {
+	defer c.mu.Unlock()
+	return c.transaction(f)
+}
+
+func (c *Cluster) transaction(f func(*ClusterTx) error) error {
 	clusterTx := &ClusterTx{
 		nodeID: c.nodeID,
 	}

From 6226bae40ceb41efe570af04faba50a3f61a8278 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 9 Jan 2018 08:38:45 +0000
Subject: [PATCH 141/227] Migrate profiles_* tables to the cluster database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/migration.go            |  6 ++++++
 lxd/db/migration_test.go       | 37 +++++++++++++++++++++++++++++++++++++
 lxd/db/node/schema.go          | 24 ------------------------
 lxd/db/node/update.go          |  3 +++
 test/suites/database_update.sh |  7 +------
 5 files changed, 47 insertions(+), 30 deletions(-)

diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index fe02ae1dd..1046617aa 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -7,6 +7,7 @@ import (
 
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
 	"github.com/pkg/errors"
 )
 
@@ -24,6 +25,7 @@ func LoadPreClusteringData(tx *sql.Tx) (*Dump, error) {
 		Data:   map[string][][]interface{}{},
 	}
 	for _, table := range preClusteringTables {
+		logger.Debugf("Loading data from table %s", table)
 		data := [][]interface{}{}
 		stmt := fmt.Sprintf("SELECT * FROM %s", table)
 		rows, err := tx.Query(stmt)
@@ -74,6 +76,7 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 	}
 
 	for _, table := range preClusteringTables {
+		logger.Debugf("Migrating data for table %s", table)
 		for i, row := range dump.Data[table] {
 			for i, element := range row {
 				// Convert []byte columns to string. This is safe to do since
@@ -191,6 +194,9 @@ var preClusteringTables = []string{
 	"certificates",
 	"config",
 	"profiles",
+	"profiles_config",
+	"profiles_devices",
+	"profiles_devices_config",
 	"containers",
 	"containers_config",
 	"containers_devices",
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 28281e08f..06c796b79 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -87,6 +87,33 @@ func TestImportPreClusteringData(t *testing.T) {
 	require.NoError(t, err)
 	assert.Len(t, volumes, 1)
 	assert.Equal(t, "/foo/bar", volumes[0].Config["source"])
+
+	// profiles
+	profiles, err := cluster.Profiles()
+	require.NoError(t, err)
+	assert.Equal(t, []string{"default", "users"}, profiles)
+	_, profile, err := cluster.ProfileGet("default")
+	require.NoError(t, err)
+	assert.Equal(t, map[string]string{}, profile.Config)
+	assert.Equal(t,
+		map[string]map[string]string{
+			"root": {
+				"path": "/",
+				"pool": "default",
+				"type": "nic"},
+			"eth0": {
+				"type":    "nic",
+				"nictype": "bridged",
+				"parent":  "lxdbr0"}},
+		profile.Devices)
+	_, profile, err = cluster.ProfileGet("users")
+	require.NoError(t, err)
+	assert.Equal(t,
+		map[string]string{
+			"boot.autostart": "false",
+			"limits.cpu":     "50%"},
+		profile.Config)
+	assert.Equal(t, map[string]map[string]string{}, profile.Devices)
 }
 
 // Return a sql.Tx against a memory database populated with pre-clustering
@@ -102,6 +129,16 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 		preClusteringNodeSchema,
 		"INSERT INTO certificates VALUES (1, 'abcd:efgh', 1, 'foo', 'FOO')",
 		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
+		"INSERT INTO profiles VALUES(1, 'default', 'Default LXD profile')",
+		"INSERT INTO profiles VALUES(2, 'users', '')",
+		"INSERT INTO profiles_config VALUES(2, 2, 'boot.autostart', 'false')",
+		"INSERT INTO profiles_config VALUES(3, 2, 'limits.cpu', '50%')",
+		"INSERT INTO profiles_devices VALUES(1, 1, 'eth0', 1)",
+		"INSERT INTO profiles_devices VALUES(2, 1, 'root', 1)",
+		"INSERT INTO profiles_devices_config VALUES(1, 1, 'nictype', 'bridged')",
+		"INSERT INTO profiles_devices_config VALUES(2, 1, 'parent', 'lxdbr0')",
+		"INSERT INTO profiles_devices_config VALUES(3, 2, 'path', '/')",
+		"INSERT INTO profiles_devices_config VALUES(4, 2, 'pool', 'default')",
 		"INSERT INTO images VALUES(1, 'abc', 'x.gz', 16, 0, 1, 0, 0, strftime('%d-%m-%Y', 'now'), 0, 0, 0)",
 		"INSERT INTO networks VALUES(1, 'lxcbr0', 'LXD bridge')",
 		"INSERT INTO networks_config VALUES(1, 1, 'ipv4.nat', 'true')",
diff --git a/lxd/db/node/schema.go b/lxd/db/node/schema.go
index fcd18b658..ec4101884 100644
--- a/lxd/db/node/schema.go
+++ b/lxd/db/node/schema.go
@@ -18,30 +18,6 @@ CREATE TABLE patches (
     applied_at DATETIME NOT NULL,
     UNIQUE (name)
 );
-CREATE TABLE profiles_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    profile_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value VARCHAR(255),
-    UNIQUE (profile_id, key),
-    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
-);
-CREATE TABLE profiles_devices (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    profile_id INTEGER NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    type INTEGER NOT NULL default 0,
-    UNIQUE (profile_id, name),
-    FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE
-);
-CREATE TABLE profiles_devices_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    profile_device_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    UNIQUE (profile_device_id, key),
-    FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE CASCADE
-);
 CREATE TABLE raft_nodes (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     address TEXT NOT NULL,
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index d4ce9efea..8ab1487bd 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -130,6 +130,9 @@ DROP TABLE images_source;
 DROP TABLE images;
 DROP TABLE networks_config;
 DROP TABLE networks;
+DROP TABLE profiles_devices_config;
+DROP TABLE profiles_devices;
+DROP TABLE profiles_config;
 DROP TABLE profiles;
 DROP TABLE storage_volumes_config;
 DROP TABLE storage_volumes;
diff --git a/test/suites/database_update.sh b/test/suites/database_update.sh
index 872308f20..4987802a0 100644
--- a/test/suites/database_update.sh
+++ b/test/suites/database_update.sh
@@ -9,14 +9,9 @@ test_database_update(){
   spawn_lxd "${LXD_MIGRATE_DIR}" true
 
   # Assert there are enough tables.
-  expected_tables=7
+  expected_tables=4
   tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE")
   [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; }
 
-  # There should be 12 "ON DELETE CASCADE" occurrences
-  expected_cascades=3
-  cascades=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "ON DELETE CASCADE")
-  [ "${cascades}" -eq "${expected_cascades}" ] || { echo "FAIL: Wrong number of ON DELETE CASCADE foreign keys. Found: ${cascades}, exected: ${expected_cascades}"; false; }
-
   kill_lxd "$LXD_MIGRATE_DIR"
 }

From 51feb86c4a4157f1295a703341e82c93adae65e2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 9 Jan 2018 10:02:23 +0000
Subject: [PATCH 142/227] Don't insert core.https_address into config when
 migrating

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/migration.go      | 13 +++++++++++++
 lxd/db/migration_test.go |  7 +++++--
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 1046617aa..e09131706 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -99,6 +99,19 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 			}
 
 			switch table {
+			case "config":
+				// Don't migrate the core.https_address config key,
+				// which is node-specific and must remain in the node
+				// database.
+				isCoreHTTPSAddress := false
+				for i, column := range columns {
+					if column == "key" && row[i] == "core.https_address" {
+						isCoreHTTPSAddress = true
+					}
+				}
+				if isCoreHTTPSAddress {
+					continue
+				}
 			case "containers":
 				fallthrough
 			case "networks_config":
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 06c796b79..c97e17cd3 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -17,9 +17,11 @@ func TestLoadPreClusteringData(t *testing.T) {
 
 	// config
 	assert.Equal(t, []string{"id", "key", "value"}, dump.Schema["config"])
-	assert.Len(t, dump.Data["config"], 1)
+	assert.Len(t, dump.Data["config"], 2)
 	rows := []interface{}{int64(1), []byte("core.https_address"), []byte("1.2.3.4:666")}
 	assert.Equal(t, rows, dump.Data["config"][0])
+	rows = []interface{}{int64(2), []byte("core.trust_password"), []byte("sekret")}
+	assert.Equal(t, rows, dump.Data["config"][1])
 
 	// networks
 	assert.Equal(t, []string{"id", "name", "description"}, dump.Schema["networks"])
@@ -55,7 +57,7 @@ func TestImportPreClusteringData(t *testing.T) {
 	err = cluster.Transaction(func(tx *db.ClusterTx) error {
 		config, err := tx.Config()
 		require.NoError(t, err)
-		values := map[string]string{"core.https_address": "1.2.3.4:666"}
+		values := map[string]string{"core.trust_password": "sekret"}
 		assert.Equal(t, values, config)
 		return nil
 	})
@@ -129,6 +131,7 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 		preClusteringNodeSchema,
 		"INSERT INTO certificates VALUES (1, 'abcd:efgh', 1, 'foo', 'FOO')",
 		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
+		"INSERT INTO config VALUES(2, 'core.trust_password', 'sekret')",
 		"INSERT INTO profiles VALUES(1, 'default', 'Default LXD profile')",
 		"INSERT INTO profiles VALUES(2, 'users', '')",
 		"INSERT INTO profiles_config VALUES(2, 2, 'boot.autostart', 'false')",

From b9fc325d8e40daab5ff39f02281f63537633f339 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 9 Jan 2018 11:49:22 +0000
Subject: [PATCH 143/227] Use the actual new node ID when migrating operations

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go | 3 ++-
 lxd/db/transaction.go     | 5 +++++
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index f0a6cdbd0..33a8ce148 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -315,6 +315,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 			return errors.Wrap(err, "failed to get ID of joining node")
 		}
 		state.Cluster.NodeID(node.ID)
+		tx.NodeID(node.ID)
 
 		// Storage pools.
 		ids, err := tx.StoragePoolIDs()
@@ -367,7 +368,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 		for _, uuid := range operations {
 			_, err := tx.OperationAdd(uuid)
 			if err != nil {
-				return err
+				return errors.Wrapf(err, "failed to migrate operation %s", uuid)
 			}
 		}
 		return nil
diff --git a/lxd/db/transaction.go b/lxd/db/transaction.go
index 8220bf8d5..ad958325e 100644
--- a/lxd/db/transaction.go
+++ b/lxd/db/transaction.go
@@ -28,3 +28,8 @@ type ClusterTx struct {
 	tx     *sql.Tx // Handle to a transaction in the cluster dqlite database.
 	nodeID int64   // Node ID of this LXD instance.
 }
+
+// NodeID sets the the node NodeID associated with this cluster transaction.
+func (c *ClusterTx) NodeID(id int64) {
+	c.nodeID = id
+}

From f163cfa96c7d0b51e574c73e34e6b6ae56d83592 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 13 Jan 2018 15:25:34 +0000
Subject: [PATCH 144/227] Handle GET container state also for containers on
 other nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_state.go | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/lxd/container_state.go b/lxd/container_state.go
index 62e328b15..c10ad41af 100644
--- a/lxd/container_state.go
+++ b/lxd/container_state.go
@@ -15,11 +15,20 @@ import (
 
 func containerState(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
-	c, err := containerLoadByName(d.State(), name)
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
 	if err != nil {
 		return SmartError(err)
 	}
+	if response != nil {
+		return response
+	}
 
+	c, err := containerLoadByName(d.State(), name)
+	if err != nil {
+		return SmartError(err)
+	}
 	state, err := c.RenderState()
 	if err != nil {
 		return InternalError(err)

From 586a29866ba1f70c13348a56e997ca46fe3a7126 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 10 Jan 2018 08:22:53 +0000
Subject: [PATCH 145/227] Skip offline nodes when replying to an internal
 containers GET

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/containers_get.go | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 7904b53fb..2ae662d05 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -82,6 +82,13 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
 
 	wg := sync.WaitGroup{}
 	for address, containers := range result {
+		// If this is an internal request from another cluster node,
+		// ignore containers from other nodes, and return only the ones
+		// on this node
+		if isClusterNotification(r) && address != "" {
+			continue
+		}
+
 		// Mark containers on unavailable nodes as down
 		if recursion && address == "0.0.0.0" {
 			for _, container := range containers {
@@ -90,13 +97,6 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
 			continue
 		}
 
-		// If this is an internal request from another cluster node,
-		// ignore containers from other nodes, and return only the ones
-		// on this node
-		if isClusterNotification(r) && address != "" {
-			continue
-		}
-
 		// For recursion requests we need to fetch the state of remote
 		// containers from their respective nodes.
 		if recursion && address != "" && !isClusterNotification(r) {

From 35c3b29cdaa8a2ad6f68130ad52b93dd3e3e9296 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 10 Jan 2018 09:37:33 +0000
Subject: [PATCH 146/227] Fix unit tests for lxc list

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/list.go      | 12 ++++++------
 lxc/list_test.go |  7 ++++---
 2 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/lxc/list.go b/lxc/list.go
index 005df23a7..5abd59474 100644
--- a/lxc/list.go
+++ b/lxc/list.go
@@ -105,7 +105,7 @@ Pre-defined column shorthand chars:
 
 	t - Type (persistent or ephemeral)
 
-	N - Node hosting the container
+	H - Node hosting the container
 
 Custom columns are defined with "key[:name][:maxWidth]":
 
@@ -128,7 +128,7 @@ lxc list -c ns,user.comment:comment
 	List images with their running state and user comment. `)
 }
 
-const defaultColumns = "ns46tSN"
+const defaultColumns = "ns46tSNH"
 
 func (c *listCmd) flags() {
 	gnuflag.StringVar(&c.columnsRaw, "c", defaultColumns, i18n.G("Columns"))
@@ -488,15 +488,15 @@ func (c *listCmd) parseColumns(clustered bool) ([]column, error) {
 	}
 
 	if clustered {
-		columnsShorthandMap['N'] = column{
+		columnsShorthandMap['H'] = column{
 			i18n.G("NODE"), c.nodeColumnData, false, false}
 	} else {
 		if c.columnsRaw != defaultColumns {
-			if strings.ContainsAny(c.columnsRaw, "N") {
-				return nil, fmt.Errorf("Can't specify column N when not clustered")
+			if strings.ContainsAny(c.columnsRaw, "H") {
+				return nil, fmt.Errorf("Can't specify column H when not clustered")
 			}
 		}
-		c.columnsRaw = strings.Replace(c.columnsRaw, "N", "", -1)
+		c.columnsRaw = strings.Replace(c.columnsRaw, "H", "", -1)
 	}
 
 	columnList := strings.Split(c.columnsRaw, ",")
diff --git a/lxc/list_test.go b/lxc/list_test.go
index 915f985fd..5f01aa6e1 100644
--- a/lxc/list_test.go
+++ b/lxc/list_test.go
@@ -52,7 +52,7 @@ func TestShouldShow(t *testing.T) {
 }
 
 // Used by TestColumns and TestInvalidColumns
-const shorthand = "46abcdlnNpPsSt"
+const shorthand = "46abcdlnNpPsStH"
 const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
 
 func TestColumns(t *testing.T) {
@@ -160,7 +160,8 @@ func TestColumns(t *testing.T) {
 
 			list := listCmd{columnsRaw: raw}
 
-			columns, err := list.parseColumns()
+			clustered := strings.Contains(raw, "H")
+			columns, err := list.parseColumns(clustered)
 			if err != nil {
 				t.Errorf("Failed to parse columns string.  Input: %s, Error: %s", raw, err)
 			}
@@ -174,7 +175,7 @@ func TestColumns(t *testing.T) {
 func TestInvalidColumns(t *testing.T) {
 	run := func(raw string) {
 		list := listCmd{columnsRaw: raw}
-		_, err := list.parseColumns()
+		_, err := list.parseColumns(true)
 		if err == nil {
 			t.Errorf("Expected error from parseColumns, received nil.  Input: %s", raw)
 		}

From 27b2ed978d6aa80b783670b79c1b1edd92791074 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 11 Jan 2018 11:23:08 +0000
Subject: [PATCH 147/227] Add --trace option to lxd to enable trace logging
 (e.g. for dqlite)

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go      | 64 ++++++++++++++++++++++++++++-----------------
 lxd/cluster/gateway_test.go |  3 ++-
 lxd/cluster/options.go      | 38 +++++++++++++++++++++++++++
 lxd/daemon.go               | 14 +++++++++-
 lxd/main_args.go            |  3 +++
 lxd/main_args_test.go       |  3 +++
 lxd/main_daemon.go          |  1 +
 7 files changed, 100 insertions(+), 26 deletions(-)
 create mode 100644 lxd/cluster/options.go

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index ceec8f5d9..5b5ba3b4e 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -33,12 +33,19 @@ import (
 // After creation, the Daemon is expected to expose whatever http handlers the
 // HandlerFuncs method returns and to access the dqlite cluster using the gRPC
 // dialer returned by the Dialer method.
-func NewGateway(db *db.Node, cert *shared.CertInfo, latency float64) (*Gateway, error) {
+func NewGateway(db *db.Node, cert *shared.CertInfo, options ...Option) (*Gateway, error) {
 	ctx, cancel := context.WithCancel(context.Background())
+
+	o := newOptions()
+	for _, option := range options {
+		option(o)
+
+	}
+
 	gateway := &Gateway{
 		db:      db,
 		cert:    cert,
-		latency: latency,
+		options: o,
 		ctx:     ctx,
 		cancel:  cancel,
 	}
@@ -57,7 +64,7 @@ func NewGateway(db *db.Node, cert *shared.CertInfo, latency float64) (*Gateway,
 type Gateway struct {
 	db      *db.Node
 	cert    *shared.CertInfo
-	latency float64
+	options *options
 
 	// The raft instance to use for creating the dqlite driver. It's nil if
 	// this LXD node is not supposed to be part of the raft cluster.
@@ -364,7 +371,7 @@ func (g *Gateway) LeaderAddress() (string, error) {
 // Initialize the gateway, creating a new raft factory and gRPC server (if this
 // node is a database node), and a gRPC dialer.
 func (g *Gateway) init() error {
-	raft, err := newRaft(g.db, g.cert, g.latency)
+	raft, err := newRaft(g.db, g.cert, g.options.latency)
 	if err != nil {
 		return errors.Wrap(err, "failed to create raft factory")
 	}
@@ -373,7 +380,11 @@ func (g *Gateway) init() error {
 	// should serve as database node, so create a dqlite driver to be
 	// exposed it over gRPC.
 	if raft != nil {
-		driver, err := dqlite.NewDriver(raft.FSM(), raft.Raft(), dqlite.LogFunc(dqliteLog))
+		driver, err := dqlite.NewDriver(
+			raft.FSM(),
+			raft.Raft(),
+			dqlite.LogFunc(dqliteLog(g.options.logLevel)),
+			dqlite.LogLevel(g.options.logLevel))
 		if err != nil {
 			return errors.Wrap(err, "failed to create dqlite driver")
 		}
@@ -517,24 +528,29 @@ func grpcMemoryDial(dial func() net.Conn) func() (*grpc.ClientConn, error) {
 const grpcEndpoint = "/protocol.SQL/Conn"
 
 // Redirect dqlite's logs to our own logger
-func dqliteLog(level, message string) {
-	if level == "TRACE" {
-		// Ignore TRACE level.
-		//
-		// TODO: lxd has no TRACE level, which is quite verbose in dqlite,
-		//       we'll need to take this level into account if we need to
-		//       do some deep debugging.
-		return
-	}
-
-	switch level {
-	case "DEBUG":
-		logger.Debug(message)
-	case "INFO":
-		logger.Info(message)
-	case "WARN":
-		logger.Warn(message)
-	default:
-		// Ignore any other log level.
+func dqliteLog(configuredLevel string) func(level, message string) {
+	return func(level, message string) {
+		if level == "TRACE" {
+			// TODO: lxd has no TRACE level, so let's map it to
+			// DEBUG. However, ignore it altogether if the
+			// configured level is not TRACE, to save some CPU
+			// (since TRACE is quite verbose in dqlite).
+			if configuredLevel != "TRACE" {
+				return
+			}
+			level = "DEBUG"
+		}
+
+		message = fmt.Sprintf("DQLite: %s", message)
+		switch level {
+		case "DEBUG":
+			logger.Debug(message)
+		case "INFO":
+			logger.Info(message)
+		case "WARN":
+			logger.Warn(message)
+		default:
+			// Ignore any other log level.
+		}
 	}
 }
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
index 48d074bca..bfefaf29a 100644
--- a/lxd/cluster/gateway_test.go
+++ b/lxd/cluster/gateway_test.go
@@ -147,7 +147,8 @@ func TestGateway_RaftNodesNotLeader(t *testing.T) {
 func newGateway(t *testing.T, db *db.Node, certInfo *shared.CertInfo) *cluster.Gateway {
 	logging.Testing(t)
 	require.NoError(t, os.Mkdir(filepath.Join(db.Dir(), "raft"), 0755))
-	gateway, err := cluster.NewGateway(db, certInfo, 0.2)
+	gateway, err := cluster.NewGateway(
+		db, certInfo, cluster.Latency(0.2), cluster.LogLevel("TRACE"))
 	require.NoError(t, err)
 	return gateway
 }
diff --git a/lxd/cluster/options.go b/lxd/cluster/options.go
new file mode 100644
index 000000000..500cfab0c
--- /dev/null
+++ b/lxd/cluster/options.go
@@ -0,0 +1,38 @@
+package cluster
+
+// Option to be passed to NewGateway to customize the resulting instance.
+type Option func(*options)
+
+// LogLevel sets the logging level for messages emitted by dqlite and raft.
+func LogLevel(level string) Option {
+	return func(options *options) {
+		options.logLevel = level
+	}
+
+}
+
+// Latency is a coarse grain measure of how fast/reliable network links
+// are. This is used to tweak the various timeouts parameters of the raft
+// algorithm. See the raft.Config structure for more details. A value of 1.0
+// means use the default values from hashicorp's raft package. Values closer to
+// 0 reduce the values of the various timeouts (useful when running unit tests
+// in-memory).
+func Latency(latency float64) Option {
+	return func(options *options) {
+		options.latency = latency
+	}
+
+}
+
+// Create a options instance with default values.
+func newOptions() *options {
+	return &options{
+		latency:  1.0,
+		logLevel: "ERROR",
+	}
+}
+
+type options struct {
+	latency  float64
+	logLevel string
+}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index dd01560a1..9bdf50514 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -79,6 +79,7 @@ type externalAuth struct {
 // DaemonConfig holds configuration values for Daemon.
 type DaemonConfig struct {
 	Group       string  // Group name the local unix socket should be chown'ed to
+	Trace       string  // Comma separated list of sub-systems to trace
 	RaftLatency float64 // Coarse grain measure of the cluster latency
 }
 
@@ -382,6 +383,9 @@ func (d *Daemon) init() error {
 			log.Ctx{"path": shared.VarPath("")})
 	}
 
+	/* List of sub-systems to trace */
+	trace := strings.Split(d.config.Trace, ",")
+
 	/* Initialize the operating system facade */
 	err = d.os.Init()
 	if err != nil {
@@ -401,7 +405,15 @@ func (d *Daemon) init() error {
 	}
 
 	/* Setup dqlite */
-	d.gateway, err = cluster.NewGateway(d.db, certInfo, d.config.RaftLatency)
+	clusterLogLevel := "ERROR"
+	if shared.StringInSlice("dqlite", trace) {
+		clusterLogLevel = "TRACE"
+	}
+	d.gateway, err = cluster.NewGateway(
+		d.db,
+		certInfo,
+		cluster.Latency(d.config.RaftLatency),
+		cluster.LogLevel(clusterLogLevel))
 	if err != nil {
 		return err
 	}
diff --git a/lxd/main_args.go b/lxd/main_args.go
index 96241c509..8f6b0848e 100644
--- a/lxd/main_args.go
+++ b/lxd/main_args.go
@@ -6,6 +6,7 @@ type Args struct {
 	Preseed              bool   `flag:"preseed"`
 	CPUProfile           string `flag:"cpuprofile"`
 	Debug                bool   `flag:"debug"`
+	Trace                string `flag:"trace"`
 	Group                string `flag:"group"`
 	Help                 bool   `flag:"help"`
 	Logfile              string `flag:"logfile"`
@@ -58,6 +59,8 @@ Commands:
 Common options:
     --debug
         Enable debug mode
+    --trace SUBSYSTEMS
+        Enable trace logging for the given comma-separated list of sub-systems (e.g. dqlite,raft)
     --help
         Print this help message
     --logfile FILE
diff --git a/lxd/main_args_test.go b/lxd/main_args_test.go
index bcb1fbc64..3dc5cd166 100644
--- a/lxd/main_args_test.go
+++ b/lxd/main_args_test.go
@@ -20,6 +20,7 @@ func TestParse_ArgsDefaults(t *testing.T) {
 	assert.Equal(t, false, args.Preseed)
 	assert.Equal(t, "", args.CPUProfile)
 	assert.Equal(t, false, args.Debug)
+	assert.Equal(t, "", args.Trace)
 	assert.Equal(t, "", args.Group)
 	assert.Equal(t, false, args.Help)
 	assert.Equal(t, "", args.Logfile)
@@ -49,6 +50,7 @@ func TestParse_ArgsCustom(t *testing.T) {
 		"--preseed",
 		"--cpuprofile", "lxd.cpu",
 		"--debug",
+		"--trace", "dqlite,raft",
 		"--group", "lxd",
 		"--help",
 		"--logfile", "lxd.log",
@@ -75,6 +77,7 @@ func TestParse_ArgsCustom(t *testing.T) {
 	assert.Equal(t, true, args.Preseed)
 	assert.Equal(t, "lxd.cpu", args.CPUProfile)
 	assert.Equal(t, true, args.Debug)
+	assert.Equal(t, "dqlite,raft", args.Trace)
 	assert.Equal(t, "lxd", args.Group)
 	assert.Equal(t, true, args.Help)
 	assert.Equal(t, "lxd.log", args.Logfile)
diff --git a/lxd/main_daemon.go b/lxd/main_daemon.go
index a4520315e..5a451d576 100644
--- a/lxd/main_daemon.go
+++ b/lxd/main_daemon.go
@@ -40,6 +40,7 @@ func cmdDaemon(args *Args) error {
 	}
 	c := DefaultDaemonConfig()
 	c.Group = args.Group
+	c.Trace = args.Trace
 	d := NewDaemon(c, sys.DefaultOS())
 	err = d.Init()
 	if err != nil {

From afbb981dab63874a2053f271153caebe51ac0d68 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 11 Jan 2018 17:39:08 +0000
Subject: [PATCH 148/227] Add support for running lxc exec against containers
 on other nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go      |  1 +
 client/lxd.go             |  7 ++++++
 lxd/container_exec.go     | 38 +++++++++++++++++++++++--------
 lxd/operations.go         | 58 +++++++++++++++++++++++++++++++++++++++++++++--
 test/suites/clustering.sh |  7 +++++-
 5 files changed, 98 insertions(+), 13 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index ee44d282f..05836707d 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -178,6 +178,7 @@ type ContainerServer interface {
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
 	RawWebsocket(path string) (conn *websocket.Conn, err error)
+	RawOperation(method string, path string, data interface{}, queryETag string) (op *Operation, ETag string, err error)
 }
 
 // The ConnectionInfo struct represents general information for a connection
diff --git a/client/lxd.go b/client/lxd.go
index 4421258eb..0ea3c62f1 100644
--- a/client/lxd.go
+++ b/client/lxd.go
@@ -113,6 +113,13 @@ func (r *ProtocolLXD) RawWebsocket(path string) (*websocket.Conn, error) {
 	return r.websocket(path)
 }
 
+// RawOperation allows direct creation of LXD API operations.
+//
+// This should only be used by internal LXD tools.
+func (r *ProtocolLXD) RawOperation(method string, path string, data interface{}, ETag string) (*Operation, string, error) {
+	return r.queryOperation(method, path, data, ETag)
+}
+
 // Internal functions
 func (r *ProtocolLXD) parseResponse(resp *http.Response) (*api.Response, string, error) {
 	// Get the ETag
diff --git a/lxd/container_exec.go b/lxd/container_exec.go
index 587312406..1a0fe0b3c 100644
--- a/lxd/container_exec.go
+++ b/lxd/container_exec.go
@@ -16,6 +16,8 @@ import (
 	"github.com/gorilla/mux"
 	"github.com/gorilla/websocket"
 
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/logger"
@@ -331,6 +333,26 @@ func (s *execWs) Do(op *operation) error {
 
 func containerExecPost(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	post := api.ContainerExecPost{}
+	buf, err := ioutil.ReadAll(r.Body)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	if err := json.Unmarshal(buf, &post); err != nil {
+		return BadRequest(err)
+	}
+
+	cert := d.endpoints.NetworkCert()
+	client, err := cluster.ConnectIfContainerIsRemote(d.cluster, name, cert)
+	if err != nil {
+		return SmartError(err)
+	}
+	if client != nil {
+		return containerExecPostCluster(client, name, post)
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
@@ -344,16 +366,6 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("Container is frozen."))
 	}
 
-	post := api.ContainerExecPost{}
-	buf, err := ioutil.ReadAll(r.Body)
-	if err != nil {
-		return BadRequest(err)
-	}
-
-	if err := json.Unmarshal(buf, &post); err != nil {
-		return BadRequest(err)
-	}
-
 	env := map[string]string{}
 
 	for k, v := range c.ExpandedConfig() {
@@ -494,3 +506,9 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 
 	return OperationResponse(op)
 }
+
+// Perform an exec request for a container running on a different cluster node.
+func containerExecPostCluster(client lxd.ContainerServer, name string, req api.ContainerExecPost) Response {
+	op, _, err := client.RawOperation("POST", fmt.Sprintf("/containers/%s/exec", name), req, "")
+	return ForwardedOperationResponse(&op.Operation)
+}
diff --git a/lxd/operations.go b/lxd/operations.go
index 46ae99597..65a0fef18 100644
--- a/lxd/operations.go
+++ b/lxd/operations.go
@@ -9,6 +9,7 @@ import (
 	"time"
 
 	"github.com/gorilla/mux"
+	"github.com/gorilla/websocket"
 	"github.com/pborman/uuid"
 	"github.com/pkg/errors"
 
@@ -605,14 +606,67 @@ func (r *operationWebSocket) String() string {
 	return md.ID
 }
 
+type forwardedOperationWebSocket struct {
+	req    *http.Request
+	id     string
+	source *websocket.Conn // Connection to the node were the operation is running
+}
+
+func (r *forwardedOperationWebSocket) Render(w http.ResponseWriter) error {
+	target, err := shared.WebsocketUpgrader.Upgrade(w, r.req, nil)
+	if err != nil {
+		return err
+	}
+	<-shared.WebsocketProxy(r.source, target)
+	return nil
+}
+
+func (r *forwardedOperationWebSocket) String() string {
+	return r.id
+}
+
 func operationAPIWebsocketGet(d *Daemon, r *http.Request) Response {
 	id := mux.Vars(r)["id"]
+
+	// First check if the websocket is for a local operation from this
+	// node.
 	op, err := operationGet(id)
+	if err == nil {
+		return &operationWebSocket{r, op}
+	}
+
+	// Secondly check if the websocket is from an operation on another
+	// node, and, if so, proxy it.
+	secret := r.FormValue("secret")
+	if secret == "" {
+		return BadRequest(fmt.Errorf("missing secret"))
+	}
+
+	var address string
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		operation, err := tx.OperationByUUID(id)
+		if err != nil {
+			return err
+		}
+		address = operation.NodeAddress
+		return nil
+	})
 	if err != nil {
-		return NotFound
+		return SmartError(err)
 	}
 
-	return &operationWebSocket{r, op}
+	cert := d.endpoints.NetworkCert()
+	client, err := cluster.Connect(address, cert, false)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	logger.Debugf("Forward operation websocket from node %s", address)
+	source, err := client.GetOperationWebsocket(id, secret)
+	if err != nil {
+		return SmartError(err)
+	}
+	return &forwardedOperationWebSocket{req: r, id: id, source: source}
 }
 
 var operationWebsocket = Command{name: "operations/{id}/websocket", untrustedGet: true, get: operationAPIWebsocketGet}
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index f61d6294d..ab50ebdb0 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -143,10 +143,15 @@ test_clustering_containers() {
   # A Node: field indicates on which node the container is running
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Node: node2"
 
-  # Start and stop the container via node1
+  # Star an the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc start foo
   LXD_DIR="${LXD_TWO_DIR}" lxc info foo | grep -q "Status: Running"
   LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q RUNNING
+
+  # Exec a command in the container via node1
+  LXD_DIR="${LXD_TWO_DIR}" lxc exec foo ls / | grep -q linuxrc
+
+  # Stop the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc stop foo
 
   # Create a container on node1 using the image that was stored on

From 56ac1ac120bee3b2392db8afa5f10b6d0e29fa06 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 11 Jan 2018 17:40:29 +0000
Subject: [PATCH 149/227] Disable dqlite checkpointing

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 5b5ba3b4e..d00b08aea 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -384,7 +384,8 @@ func (g *Gateway) init() error {
 			raft.FSM(),
 			raft.Raft(),
 			dqlite.LogFunc(dqliteLog(g.options.logLevel)),
-			dqlite.LogLevel(g.options.logLevel))
+			dqlite.LogLevel(g.options.logLevel),
+			dqlite.AutoCheckpoint(10000000))
 		if err != nil {
 			return errors.Wrap(err, "failed to create dqlite driver")
 		}

From 1d1e3259b8e03cd7e542fc030ab053198eab545d Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 12 Jan 2018 07:33:14 +0000
Subject: [PATCH 150/227] Use sqlite build from Jenkins if available

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/includes/lxd.sh           | 16 +++++++++++-----
 test/suites/static_analysis.sh | 12 +++++++++---
 2 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index ad88ea2aa..8d9e75b31 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -15,7 +15,10 @@ spawn_lxd() {
     shift
 
     # Link to local sqlite with replication patch for dqlite
-    sqlite="$(pwd)/../lxd/sqlite/.libs"
+    sqlite="$(pwd)/../lxd/sqlite"
+    if [ -e "/lxc-ci/build/cache/sqlite" ]; then
+	sqlite="/lxc-ci/build/cache/sqlite"
+    fi
 
     # shellcheck disable=SC2153
     if [ "$LXD_BACKEND" = "random" ]; then
@@ -41,10 +44,10 @@ spawn_lxd() {
     # shellcheck disable=SC2086
 
     if [ "${LXD_NETNS}" = "" ]; then
-	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+	LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     else
 	pid="$(lxc-info -n "${LXD_NETNS}" -p | cut -f 2 -d : | tr -d " ")"
-	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" nsenter --all --target="${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+	LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" nsenter --all --target="${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     fi
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
@@ -96,11 +99,14 @@ respawn_lxd() {
     shift
 
     # Link to local sqlite with replication patch for dqlite
-    sqlite="$(pwd)/../lxd/sqlite/.libs"
+    sqlite="$(pwd)/../lxd/sqlite"
+    if [ -e "/lxc-ci/build/cache/sqlite" ]; then
+	sqlite="/lxc-ci/build/cache/sqlite"
+    fi
 
     echo "==> Spawning lxd in ${lxddir}"
     # shellcheck disable=SC2086
-    LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+    LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
     echo "==> Spawned LXD (PID is ${LXD_PID})"
diff --git a/test/suites/static_analysis.sh b/test/suites/static_analysis.sh
index d834d1495..98e3b65d3 100644
--- a/test/suites/static_analysis.sh
+++ b/test/suites/static_analysis.sh
@@ -22,9 +22,15 @@ test_static_analysis() {
     fi
 
     # Go static analysis
-    CGO_CFLAGS="-I$(pwd)/lxd/sqlite/"
-    CGO_LDFLAGS="-L$(pwd)/lxd/sqlite/.libs"
-    LD_LIBRARY_PATH="$(pwd)/lxd/sqlite/.libs"
+    sqlite="$(pwd)/lxd/sqlite"
+    if [ -e "/lxc-ci/build/cache/sqlite" ]; then
+	sqlite="/lxc-ci/build/cache/sqlite"
+	ls "/lxc-ci/build/cache/sqlite"
+    fi
+    
+    CGO_CFLAGS="-I${sqlite}"
+    CGO_LDFLAGS="-L${sqlite}/.libs"
+    LD_LIBRARY_PATH="${sqlite}/.libs"
     export CGO_CFLAGS
     export CGO_LDFLAGS
     export LD_LIBRARY_PATH

From e1f0649e80f66c8513a6dd6a569c99b1d7388ec4 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 12 Jan 2018 07:58:19 +0000
Subject: [PATCH 151/227] Fix typos

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/events.go | 2 +-
 lxd/container.go      | 2 +-
 lxd/db/db.go          | 4 ++--
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/lxd/cluster/events.go b/lxd/cluster/events.go
index 9b72d1f20..710668587 100644
--- a/lxd/cluster/events.go
+++ b/lxd/cluster/events.go
@@ -12,7 +12,7 @@ import (
 	"golang.org/x/net/context"
 )
 
-// Events starts a task that continuosly monitors the list of cluster nodes and
+// Events starts a task that continuously monitors the list of cluster nodes and
 // maintains a pool of websocket connections against all of them, in order to
 // get notified about events.
 //
diff --git a/lxd/container.go b/lxd/container.go
index 6bd1eea87..ff16d9139 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -622,7 +622,7 @@ func containerCreateFromImage(d *Daemon, args db.ContainerArgs, hash string) (co
 	if nodeAddress != "" {
 		// The image is available from another node, let's try to
 		// import it.
-		logger.Debugf("Transfering image %s from node %s", hash, nodeAddress)
+		logger.Debugf("Transferring image %s from node %s", hash, nodeAddress)
 		client, err := cluster.Connect(nodeAddress, d.endpoints.NetworkCert(), false)
 		if err != nil {
 			return nil, err
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 07e7fed3c..8746a0b0a 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -246,7 +246,7 @@ func (c *Cluster) EnterExclusive() {
 	c.mu.Lock()
 }
 
-// ExitExclusive runs the given transation and then releases the lock acquired
+// ExitExclusive runs the given transaction and then releases the lock acquired
 // with EnterExclusive.
 func (c *Cluster) ExitExclusive(f func(*ClusterTx) error) error {
 	defer c.mu.Unlock()
@@ -269,7 +269,7 @@ func (c *Cluster) transaction(f func(*ClusterTx) error) error {
 // NodeID sets the the node NodeID associated with this cluster instance. It's used for
 // backward-compatibility of all db-related APIs that were written before
 // clustering and don't accept a node NodeID, so in those cases we automatically
-// use this value as implict node NodeID.
+// use this value as implicit node NodeID.
 func (c *Cluster) NodeID(id int64) {
 	c.nodeID = id
 }

From 8ec71c9a74389b5a0f71e201ec54edfbf9fd86c6 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 12 Jan 2018 08:13:07 +0000
Subject: [PATCH 152/227] Fix ineffectual assignments to err

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go            | 3 +++
 lxd/cluster/heartbeat_test.go | 1 +
 lxd/container_exec.go         | 3 +++
 lxd/db/images_test.go         | 1 +
 lxd/main_activateifneeded.go  | 3 +++
 lxd/networks.go               | 6 +++---
 6 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 42a82d02f..08cee2417 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -441,6 +441,9 @@ func clusterNodeGet(d *Daemon, r *http.Request) Response {
 		}
 		return nil
 	})
+	if err != nil {
+		return SmartError(err)
+	}
 
 	return SyncResponse(true, node)
 }
diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
index b40d4292e..52a7a3c05 100644
--- a/lxd/cluster/heartbeat_test.go
+++ b/lxd/cluster/heartbeat_test.go
@@ -136,6 +136,7 @@ func (f *heartbeatFixture) Grow() *cluster.Gateway {
 
 	nodes, err := cluster.Accept(
 		targetState, target, name, address, cluster.SchemaVersion, len(version.APIExtensions))
+	require.NoError(f.t, err)
 
 	err = cluster.Join(state, gateway, target.Cert(), name, nodes)
 	require.NoError(f.t, err)
diff --git a/lxd/container_exec.go b/lxd/container_exec.go
index 1a0fe0b3c..866398419 100644
--- a/lxd/container_exec.go
+++ b/lxd/container_exec.go
@@ -510,5 +510,8 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 // Perform an exec request for a container running on a different cluster node.
 func containerExecPostCluster(client lxd.ContainerServer, name string, req api.ContainerExecPost) Response {
 	op, _, err := client.RawOperation("POST", fmt.Sprintf("/containers/%s/exec", name), req, "")
+	if err != nil {
+		return SmartError(err)
+	}
 	return ForwardedOperationResponse(&op.Operation)
 }
diff --git a/lxd/db/images_test.go b/lxd/db/images_test.go
index c1f9b145d..e8038fd46 100644
--- a/lxd/db/images_test.go
+++ b/lxd/db/images_test.go
@@ -34,5 +34,6 @@ func TestImageLocate(t *testing.T) {
 	require.NoError(t, err)
 
 	address, err = cluster.ImageLocate("abc")
+	require.Equal(t, "", address)
 	require.EqualError(t, err, "image not available on any online node")
 }
diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go
index 0cc37ae70..acb1bf0e9 100644
--- a/lxd/main_activateifneeded.go
+++ b/lxd/main_activateifneeded.go
@@ -74,6 +74,9 @@ func cmdActivateIfNeeded(args *Args) error {
 
 	d.cluster = db.ForLocalInspection(sqldb)
 	result, err := d.cluster.ContainersList(db.CTypeRegular)
+	if err != nil {
+		return err
+	}
 
 	for _, name := range result {
 		c, err := containerLoadByName(d.State(), name)
diff --git a/lxd/networks.go b/lxd/networks.go
index 7475018b0..1195a8dfa 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -427,9 +427,9 @@ func networkDelete(d *Daemon, r *http.Request) Response {
 		err = notifier(func(client lxd.ContainerServer) error {
 			return client.DeleteNetwork(name)
 		})
-	}
-	if err != nil {
-		return SmartError(err)
+		if err != nil {
+			return SmartError(err)
+		}
 	}
 
 	// Delete the network

From 0dc4f5055466f43bd4a06d123b6b7fe9402b221f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 12 Jan 2018 08:46:47 +0000
Subject: [PATCH 153/227] Fix activateifneeded integration tests needing
 LD_LIBRARY_PATH

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/suites/basic.sh | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/test/suites/basic.sh b/test/suites/basic.sh
index 45726e427..dfb3ad1af 100644
--- a/test/suites/basic.sh
+++ b/test/suites/basic.sh
@@ -249,6 +249,12 @@ test_basic_usage() {
   chmod +x "${LXD_ACTIVATION_DIR}"
   spawn_lxd "${LXD_ACTIVATION_DIR}" true
   (
+    # Link to local sqlite with replication patch for dqlite
+    sqlite="$(pwd)/../lxd/sqlite"
+    if [ -e "/lxc-ci/build/cache/sqlite" ]; then
+	sqlite="/lxc-ci/build/cache/sqlite"
+    fi
+
     set -e
     # shellcheck disable=SC2030
     LXD_DIR=${LXD_ACTIVATION_DIR}
@@ -259,7 +265,7 @@ test_basic_usage() {
     lxc init testimage autostart --force-local
     lxd activateifneeded --debug 2>&1 | grep -q -v "activating..."
     lxc config set autostart boot.autostart true --force-local
-    lxd activateifneeded --debug 2>&1 | grep -q "Daemon has auto-started containers, activating..."
+    LD_LIBRARY_PATH="${sqlite}/.libs" lxd activateifneeded --debug 2>&1 | grep -q "Daemon has auto-started containers, activating..."
 
     lxc config unset autostart boot.autostart --force-local
     lxd activateifneeded --debug 2>&1 | grep -q -v "activating..."
@@ -269,7 +275,7 @@ test_basic_usage() {
     shutdown_lxd "${LXD_DIR}"
     [ -d "/proc/${PID}" ] && false
 
-    lxd activateifneeded --debug 2>&1 | grep -q "Daemon has auto-started containers, activating..."
+    LD_LIBRARY_PATH="${sqlite}/.libs" lxd activateifneeded --debug 2>&1 | grep -q "Daemon has auto-started containers, activating..."
 
     # shellcheck disable=SC2031
     respawn_lxd "${LXD_DIR}"

From c05e58dbc5a31cba8e0d86e1e854ebd62ac6a9ec Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 12 Jan 2018 11:32:56 +0000
Subject: [PATCH 154/227] Pull, push and delete files from containers running
 on other nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_file.go     |  9 +++++++++
 test/suites/clustering.sh | 20 ++++++++++++++++++--
 2 files changed, 27 insertions(+), 2 deletions(-)

diff --git a/lxd/container_file.go b/lxd/container_file.go
index a7f6974a8..516b2bfd5 100644
--- a/lxd/container_file.go
+++ b/lxd/container_file.go
@@ -15,6 +15,15 @@ import (
 
 func containerFileHandler(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index ab50ebdb0..55401cffb 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -143,13 +143,29 @@ test_clustering_containers() {
   # A Node: field indicates on which node the container is running
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Node: node2"
 
-  # Star an the container via node1
+  # Start the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc start foo
   LXD_DIR="${LXD_TWO_DIR}" lxc info foo | grep -q "Status: Running"
   LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q RUNNING
 
   # Exec a command in the container via node1
-  LXD_DIR="${LXD_TWO_DIR}" lxc exec foo ls / | grep -q linuxrc
+  LXD_DIR="${LXD_ONE_DIR}" lxc exec foo ls / | grep -q linuxrc
+
+  # Pull, push and delete files from the container via node1
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc file pull foo/non-existing-file "${TEST_DIR}/non-existing-file"
+  mkdir "${TEST_DIR}/hello-world"
+  echo "hello world" > "${TEST_DIR}/hello-world/text"
+  LXD_DIR="${LXD_ONE_DIR}" lxc file push "${TEST_DIR}/hello-world/text" foo/hello-world-text
+  LXD_DIR="${LXD_ONE_DIR}" lxc file pull foo/hello-world-text "${TEST_DIR}/hello-world-text"
+  grep -q "hello world" "${TEST_DIR}/hello-world-text"
+  rm "${TEST_DIR}/hello-world-text"
+  LXD_DIR="${LXD_ONE_DIR}" lxc file push --recursive "${TEST_DIR}/hello-world" foo/
+  rm -r "${TEST_DIR}/hello-world"
+  LXD_DIR="${LXD_ONE_DIR}" lxc file pull --recursive foo/hello-world "${TEST_DIR}"
+  grep -q "hello world" "${TEST_DIR}/hello-world/text"
+  rm -r "${TEST_DIR}/hello-world"
+  LXD_DIR="${LXD_ONE_DIR}" lxc file delete foo/hello-world/text
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc file pull foo/hello-world/text "${TEST_DIR}/hello-world-text"
 
   # Stop the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc stop foo

From cf8e58a55a742bd1501ad517d21b5c2144952a50 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 12 Jan 2018 15:39:53 +0000
Subject: [PATCH 155/227] Support snapshotting a container running on another
 node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_snapshot.go | 9 +++++++++
 test/suites/clustering.sh | 4 ++++
 2 files changed, 13 insertions(+)

diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index 13cb62412..e977a1d52 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -63,6 +63,15 @@ func containerSnapshotsGet(d *Daemon, r *http.Request) Response {
 func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
 
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	/*
 	 * snapshot is a three step operation:
 	 * 1. choose a new name
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 55401cffb..9f47b7764 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -170,6 +170,10 @@ test_clustering_containers() {
   # Stop the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc stop foo
 
+  # Create a snapshot of the container via node1
+  LXD_DIR="${LXD_ONE_DIR}" lxc snapshot foo foo-bak
+  LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q foo-bak
+
   # Create a container on node1 using the image that was stored on
   # node2.
   LXD_DIR="${LXD_TWO_DIR}" lxc launch --target node1 testimage bar

From 8eb1d42c8a4f6b186f36f24eda69cb928bd55947 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 12 Jan 2018 16:07:30 +0000
Subject: [PATCH 156/227] Support deleting a snapshot of a container running on
 another node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_snapshot.go | 8 ++++++++
 test/suites/clustering.sh | 4 +++-
 2 files changed, 11 insertions(+), 1 deletion(-)

diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index e977a1d52..b1851eca9 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -147,6 +147,14 @@ func snapshotHandler(d *Daemon, r *http.Request) Response {
 	containerName := mux.Vars(r)["name"]
 	snapshotName := mux.Vars(r)["snapshotName"]
 
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, containerName)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	sc, err := containerLoadByName(
 		d.State(),
 		containerName+
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 9f47b7764..6953632b6 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -170,9 +170,11 @@ test_clustering_containers() {
   # Stop the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc stop foo
 
-  # Create a snapshot of the container via node1
+  # Create and delete a snapshot of the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc snapshot foo foo-bak
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q foo-bak
+  LXD_DIR="${LXD_ONE_DIR}" lxc delete foo/foo-bak
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q foo-bak
 
   # Create a container on node1 using the image that was stored on
   # node2.

From 1c91658d4ffdcdfa509d6a3b569e97f31ab1f1ee Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 12 Jan 2018 16:09:20 +0000
Subject: [PATCH 157/227] Add /1.0/cluster urls to the API structure overview
 in rest-api.md

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 doc/rest-api.md | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/doc/rest-api.md b/doc/rest-api.md
index 1ff691a3c..43f9f1669 100644
--- a/doc/rest-api.md
+++ b/doc/rest-api.md
@@ -217,6 +217,9 @@ won't work and PUT needs to be used instead.
          * `/1.0/storage-pools/<name>/volumes`
            * `/1.0/storage-pools/<name>/volumes/<volume type>/<volume>`
      * `/1.0/resources`
+     * `/1.0/cluster`
+       * `/1.0/cluster/nodes`
+         * `/1.0/cluster/nodes/<name>`
 
 # API details
 ## `/`

From 5387c8968914b492d883910d565fcc2c73b4949a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 13 Jan 2018 16:09:50 +0000
Subject: [PATCH 158/227] Support renaming/updating a container running on
 another node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_get.go      | 10 ++++++++++
 lxd/container_patch.go    | 10 ++++++++++
 lxd/container_post.go     | 10 ++++++++++
 lxd/container_put.go      | 10 ++++++++++
 test/suites/clustering.sh | 12 +++++++++---
 5 files changed, 49 insertions(+), 3 deletions(-)

diff --git a/lxd/container_get.go b/lxd/container_get.go
index d8901b2a8..24e569588 100644
--- a/lxd/container_get.go
+++ b/lxd/container_get.go
@@ -8,6 +8,16 @@ import (
 
 func containerGet(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
diff --git a/lxd/container_patch.go b/lxd/container_patch.go
index bb086e842..2b5a386db 100644
--- a/lxd/container_patch.go
+++ b/lxd/container_patch.go
@@ -19,6 +19,16 @@ import (
 func containerPatch(d *Daemon, r *http.Request) Response {
 	// Get the container
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return NotFound
diff --git a/lxd/container_post.go b/lxd/container_post.go
index 7c18c5c38..3a7e49676 100644
--- a/lxd/container_post.go
+++ b/lxd/container_post.go
@@ -14,6 +14,16 @@ import (
 
 func containerPost(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
diff --git a/lxd/container_put.go b/lxd/container_put.go
index 93cba1d0b..c0762a057 100644
--- a/lxd/container_put.go
+++ b/lxd/container_put.go
@@ -23,6 +23,16 @@ import (
 func containerPut(d *Daemon, r *http.Request) Response {
 	// Get the container
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return NotFound
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 6953632b6..6f453830d 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -170,11 +170,17 @@ test_clustering_containers() {
   # Stop the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc stop foo
 
-  # Create and delete a snapshot of the container via node1
+  # Rename the container via node1
+  LXD_DIR="${LXD_ONE_DIR}" lxc rename foo foo2
+  LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q foo2
+  LXD_DIR="${LXD_ONE_DIR}" lxc rename foo2 foo
+
+  # Create, rename and delete a snapshot of the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc snapshot foo foo-bak
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q foo-bak
-  LXD_DIR="${LXD_ONE_DIR}" lxc delete foo/foo-bak
-  ! LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q foo-bak
+  LXD_DIR="${LXD_ONE_DIR}" lxc rename foo/foo-bak foo/foo-bak-2
+  LXD_DIR="${LXD_ONE_DIR}" lxc delete foo/foo-bak-2
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q foo-bak-2
 
   # Create a container on node1 using the image that was stored on
   # node2.

From 03f461fea1ede2714f3ea1a704eff9cea29c6c42 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 13 Jan 2018 16:19:34 +0000
Subject: [PATCH 159/227] Metadata and template APIs targeting containers on
 other nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_metadata.go | 50 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 50 insertions(+)

diff --git a/lxd/container_metadata.go b/lxd/container_metadata.go
index 5d8ab41c8..1fa5246cd 100644
--- a/lxd/container_metadata.go
+++ b/lxd/container_metadata.go
@@ -20,6 +20,16 @@ import (
 
 func containerMetadataGet(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
@@ -53,6 +63,16 @@ func containerMetadataGet(d *Daemon, r *http.Request) Response {
 
 func containerMetadataPut(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
@@ -87,6 +107,16 @@ func containerMetadataPut(d *Daemon, r *http.Request) Response {
 // Return a list of templates used in a container or the content of a template
 func containerMetadataTemplatesGet(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
@@ -154,6 +184,16 @@ func containerMetadataTemplatesGet(d *Daemon, r *http.Request) Response {
 // Add a container template file
 func containerMetadataTemplatesPostPut(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
@@ -197,6 +237,16 @@ func containerMetadataTemplatesPostPut(d *Daemon, r *http.Request) Response {
 // Delete a container template
 func containerMetadataTemplatesDelete(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)

From 66fb7a724f55071df0c43faae8fc3c59e09be2b6 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 13 Jan 2018 16:20:24 +0000
Subject: [PATCH 160/227] Support GET /containers/{name}/snapshots for remote
 containers

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_snapshot.go | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index b1851eca9..74cec56f8 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -18,13 +18,23 @@ import (
 )
 
 func containerSnapshotsGet(d *Daemon, r *http.Request) Response {
+	cname := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, cname)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	recursionStr := r.FormValue("recursion")
 	recursion, err := strconv.Atoi(recursionStr)
 	if err != nil {
 		recursion = 0
 	}
 
-	cname := mux.Vars(r)["name"]
 	c, err := containerLoadByName(d.State(), cname)
 	if err != nil {
 		return SmartError(err)

From f3fd1c77c19d5dc99c4eaf7c74ae7eef224c36d3 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 13 Jan 2018 16:32:45 +0000
Subject: [PATCH 161/227] Support managing logs on containers running on other
 nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_logs.go     | 29 +++++++++++++++++++++++++++++
 test/suites/clustering.sh |  3 +++
 2 files changed, 32 insertions(+)

diff --git a/lxd/container_logs.go b/lxd/container_logs.go
index a30c6b002..7e914c6db 100644
--- a/lxd/container_logs.go
+++ b/lxd/container_logs.go
@@ -23,6 +23,15 @@ func containerLogsGet(d *Daemon, r *http.Request) Response {
 	 */
 	name := mux.Vars(r)["name"]
 
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	if err := containerValidName(name); err != nil {
 		return BadRequest(err)
 	}
@@ -63,6 +72,16 @@ func validLogFileName(fname string) bool {
 
 func containerLogGet(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	file := mux.Vars(r)["file"]
 
 	if err := containerValidName(name); err != nil {
@@ -83,6 +102,16 @@ func containerLogGet(d *Daemon, r *http.Request) Response {
 
 func containerLogDelete(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	file := mux.Vars(r)["file"]
 
 	if err := containerValidName(name); err != nil {
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 6f453830d..0bb2c4790 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -175,6 +175,9 @@ test_clustering_containers() {
   LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q foo2
   LXD_DIR="${LXD_ONE_DIR}" lxc rename foo2 foo
 
+  # Show lxc.log via node1
+  LXD_DIR="${LXD_ONE_DIR}" lxc info --show-log foo | grep -q Log
+
   # Create, rename and delete a snapshot of the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc snapshot foo foo-bak
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q foo-bak

From cbaf70625dbca633d869ac714d8e5ea49b3183e0 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 13 Jan 2018 17:36:41 +0000
Subject: [PATCH 162/227] Support exporting images only available on other
 nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/images.go             | 15 +++++++++++++++
 test/suites/clustering.sh |  4 ++++
 2 files changed, 19 insertions(+)

diff --git a/lxd/images.go b/lxd/images.go
index c6e361e3a..192b88e73 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -1510,6 +1510,21 @@ func imageExport(d *Daemon, r *http.Request) Response {
 		return NotFound
 	}
 
+	// Check if the image is only available on another node.
+	address, err := d.cluster.ImageLocate(imgInfo.Fingerprint)
+	if err != nil {
+		return SmartError(err)
+	}
+	if address != "" {
+		// Forward the request to the other node
+		cert := d.endpoints.NetworkCert()
+		client, err := cluster.Connect(address, cert, false)
+		if err != nil {
+			return SmartError(err)
+		}
+		return ForwardedResponse(client, r)
+	}
+
 	imagePath := shared.VarPath("images", imgInfo.Fingerprint)
 	rootfsPath := imagePath + ".rootfs"
 
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 0bb2c4790..f8cff92f9 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -185,6 +185,10 @@ test_clustering_containers() {
   LXD_DIR="${LXD_ONE_DIR}" lxc delete foo/foo-bak-2
   ! LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q foo-bak-2
 
+  # Export from node1 the image that was imported on node2
+  LXD_DIR="${LXD_ONE_DIR}" lxc image export testimage "${TEST_DIR}/testimage"
+  rm "${TEST_DIR}/testimage.tar.xz"
+
   # Create a container on node1 using the image that was stored on
   # node2.
   LXD_DIR="${LXD_TWO_DIR}" lxc launch --target node1 testimage bar

From 900a2a61395d1999d6d78d52158fe1aeaa24f9bd Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 15 Jan 2018 10:07:53 +0000
Subject: [PATCH 163/227] Add cluster.offline_threshold configuration key

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 doc/server.md                  |  1 +
 lxd/api_cluster.go             | 11 ++++++++---
 lxd/cluster/config.go          | 27 +++++++++++++++++++++++++++
 lxd/cluster/config_test.go     | 15 +++++++++++++++
 lxd/cluster/events.go          | 16 ++++++++++++++--
 lxd/cluster/heartbeat.go       |  5 ++++-
 lxd/cluster/heartbeat_test.go  | 12 ++++++++++--
 lxd/cluster/membership.go      | 20 ++++++++++++++------
 lxd/cluster/membership_test.go |  3 ++-
 lxd/cluster/notify.go          |  7 ++++++-
 lxd/db/containers.go           |  7 ++++++-
 lxd/db/images.go               |  8 ++++++--
 lxd/db/node.go                 | 39 ++++++++++++++++++++++++++++++---------
 lxd/db/node_test.go            |  4 ++--
 test/suites/clustering.sh      |  6 ++++--
 15 files changed, 149 insertions(+), 32 deletions(-)

diff --git a/doc/server.md b/doc/server.md
index 8426246ab..a24668b58 100644
--- a/doc/server.md
+++ b/doc/server.md
@@ -10,6 +10,7 @@ currently supported:
 
 Key                             | Type      | Default   | API extension            | Description
 :--                             | :---      | :------   | :------------            | :----------
+cluster.offline\_threshold      | integer   | 20        | clustering               | Number of seconds after which an unresponsive node is considered offline
 core.https\_address             | string    | -         | -                        | Address to bind for the remote API
 core.https\_allowed\_credentials| boolean   | -         | -                        | Whether to set Access-Control-Allow-Credentials http header value to "true"
 core.https\_allowed\_headers    | string    | -         | -                        | Access-Control-Allow-Headers http header value
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 08cee2417..6b35e6153 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -380,7 +380,7 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 }
 
 func clusterNodesGet(d *Daemon, r *http.Request) Response {
-	dbNodes, flags, err := cluster.List(d.State())
+	dbNodes, flags, offlineThreshold, err := cluster.List(d.State())
 	if err != nil {
 		return SmartError(err)
 	}
@@ -390,7 +390,7 @@ func clusterNodesGet(d *Daemon, r *http.Request) Response {
 		nodes[i].Name = dbNode.Name
 		nodes[i].URL = fmt.Sprintf("https://%s", dbNode.Address)
 		nodes[i].Database = flags[dbNode.ID]
-		if dbNode.IsDown() {
+		if dbNode.IsOffline(offlineThreshold) {
 			nodes[i].State = "OFFLINE"
 		} else {
 			nodes[i].State = "ONLINE"
@@ -413,13 +413,18 @@ func clusterNodeGet(d *Daemon, r *http.Request) Response {
 	node.Name = name
 	address := ""
 	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		offlineThreshold, err := tx.NodeOfflineThreshold()
+		if err != nil {
+			return err
+		}
+
 		dbNode, err := tx.NodeByName(name)
 		if err != nil {
 			return err
 		}
 		address = dbNode.Address
 		node.URL = fmt.Sprintf("https://%s", dbNode.Address)
-		if dbNode.IsDown() {
+		if dbNode.IsOffline(offlineThreshold) {
 			node.State = "OFFLINE"
 		} else {
 			node.State = "ONLINE"
diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index 142b4e8ba..47a940db1 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -6,6 +6,7 @@ import (
 	"fmt"
 	"io"
 	"os/exec"
+	"strconv"
 	"time"
 
 	"golang.org/x/crypto/scrypt"
@@ -103,6 +104,14 @@ func (c *Config) MAASController() (string, string, string) {
 	return url, key, machine
 }
 
+// OfflineThreshold returns the configured heartbeat threshold, i.e. the
+// number of seconds before after which an unresponsive node is considered
+// offline..
+func (c *Config) OfflineThreshold() time.Duration {
+	n := c.m.GetInt64("cluster.offline_threshold")
+	return time.Duration(n) * time.Second
+}
+
 // Dump current configuration keys and their values. Keys with values matching
 // their defaults are omitted.
 func (c *Config) Dump() map[string]interface{} {
@@ -192,6 +201,7 @@ func configGet(cluster *db.Cluster) (*Config, error) {
 
 // ConfigSchema defines available server configuration keys.
 var ConfigSchema = config.Schema{
+	"cluster.offline_threshold":      {Type: config.Int64, Default: offlineThresholdDefault(), Validator: offlineThresholdValidator},
 	"core.https_allowed_headers":     {},
 	"core.https_allowed_methods":     {},
 	"core.https_allowed_origin":      {},
@@ -220,6 +230,23 @@ var ConfigSchema = config.Schema{
 	"storage.zfs_use_refquota":     {Setter: deprecatedStorage, Type: config.Bool},
 }
 
+func offlineThresholdDefault() string {
+	return strconv.Itoa(db.DefaultOfflineThreshold)
+}
+
+func offlineThresholdValidator(value string) error {
+	// Ensure that the given value is greater than the heartbeat interval,
+	// which is the lower bound granularity of the offline check.
+	threshold, err := strconv.Atoi(value)
+	if err != nil {
+		return fmt.Errorf("offline threshold is not a number")
+	}
+	if threshold <= heartbeatInterval {
+		return fmt.Errorf("value must be greater than '%d'", heartbeatInterval)
+	}
+	return nil
+}
+
 func passwordSetter(value string) (string, error) {
 	// Nothing to do on unset
 	if value == "" {
diff --git a/lxd/cluster/config_test.go b/lxd/cluster/config_test.go
index ee67ac3d1..bc315aca6 100644
--- a/lxd/cluster/config_test.go
+++ b/lxd/cluster/config_test.go
@@ -18,6 +18,8 @@ func TestConfigLoad_Initial(t *testing.T) {
 
 	require.NoError(t, err)
 	assert.Equal(t, map[string]interface{}{}, config.Dump())
+
+	assert.Equal(t, float64(20), config.OfflineThreshold().Seconds())
 }
 
 // If the database contains invalid keys, they are ignored.
@@ -49,6 +51,19 @@ func TestConfigLoad_Triggers(t *testing.T) {
 	assert.Equal(t, map[string]interface{}{}, config.Dump())
 }
 
+// Offline threshold must be greater than the heartbeat interval.
+func TestConfigLoad_OfflineThresholdValidator(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	config, err := cluster.ConfigLoad(tx)
+	require.NoError(t, err)
+
+	_, err = config.Patch(map[string]interface{}{"cluster.offline_threshold": "2"})
+	require.EqualError(t, err, "cannot set 'cluster.offline_threshold' to '2': value must be greater than '3'")
+
+}
+
 // If some previously set values are missing from the ones passed to Replace(),
 // they are deleted from the configuration.
 func TestConfig_ReplaceDeleteValues(t *testing.T) {
diff --git a/lxd/cluster/events.go b/lxd/cluster/events.go
index 710668587..b9e0b4751 100644
--- a/lxd/cluster/events.go
+++ b/lxd/cluster/events.go
@@ -24,10 +24,22 @@ func Events(endpoints *endpoints.Endpoints, cluster *db.Cluster, f func(int64, i
 	update := func(ctx context.Context) {
 		// Get the current cluster nodes.
 		var nodes []db.NodeInfo
+		var offlineThreshold time.Duration
+
 		err := cluster.Transaction(func(tx *db.ClusterTx) error {
 			var err error
+
 			nodes, err = tx.Nodes()
-			return err
+			if err != nil {
+				return err
+			}
+
+			offlineThreshold, err = tx.NodeOfflineThreshold()
+			if err != nil {
+				return err
+			}
+
+			return nil
 		})
 		if err != nil {
 			logger.Warnf("Failed to get current cluster nodes: %v", err)
@@ -44,7 +56,7 @@ func Events(endpoints *endpoints.Endpoints, cluster *db.Cluster, f func(int64, i
 			ids[i] = int(node.ID)
 
 			// Don't bother trying to connect to offline nodes, or to ourselves.
-			if node.IsDown() || node.Address == address {
+			if node.IsOffline(offlineThreshold) || node.Address == address {
 				continue
 			}
 
diff --git a/lxd/cluster/heartbeat.go b/lxd/cluster/heartbeat.go
index 365f795d1..3d1d5e802 100644
--- a/lxd/cluster/heartbeat.go
+++ b/lxd/cluster/heartbeat.go
@@ -87,11 +87,14 @@ func Heartbeat(gateway *Gateway, cluster *db.Cluster) (task.Func, task.Schedule)
 		}
 	}
 
-	schedule := task.Every(3 * time.Second)
+	schedule := task.Every(time.Duration(heartbeatInterval) * time.Second)
 
 	return heartbeat, schedule
 }
 
+// Number of seconds to wait between to heartbeat rounds.
+const heartbeatInterval = 3
+
 // Perform a single heartbeat request against the node with the given address.
 func heartbeatNode(ctx context.Context, address string, cert *shared.CertInfo, raftNodes []db.RaftNode) error {
 	config, err := tlsClientConfig(cert)
diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
index 52a7a3c05..f4e3a5368 100644
--- a/lxd/cluster/heartbeat_test.go
+++ b/lxd/cluster/heartbeat_test.go
@@ -60,8 +60,12 @@ func TestHeartbeat(t *testing.T) {
 	err = state0.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		nodes, err := tx.Nodes()
 		require.NoError(t, err)
+
+		offlineThreshold, err := tx.NodeOfflineThreshold()
+		require.NoError(t, err)
+
 		for _, node := range nodes {
-			assert.False(t, node.IsDown())
+			assert.False(t, node.IsOffline(offlineThreshold))
 		}
 		return nil
 	})
@@ -101,7 +105,11 @@ func TestHeartbeat_MarkAsDown(t *testing.T) {
 	err = state0.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		nodes, err := tx.Nodes()
 		require.NoError(t, err)
-		assert.True(t, nodes[1].IsDown())
+
+		offlineThreshold, err := tx.NodeOfflineThreshold()
+		require.NoError(t, err)
+
+		assert.True(t, nodes[1].IsOffline(offlineThreshold))
 		return nil
 	})
 	require.NoError(t, err)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 33a8ce148..b5cb880a7 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -462,9 +462,10 @@ func Leave(state *state.State, gateway *Gateway, name string, force bool) (strin
 
 // List the nodes of the cluster.
 //
-// Upon success return a list of the current nodes and a map that for each ID
-// tells if the node is part of the database cluster or not.
-func List(state *state.State) ([]db.NodeInfo, map[int64]bool, error) {
+// Upon success return a list of the current nodes, a map that for each ID
+// tells if the node is part of the database cluster or not, and the configured
+// offline threshold.
+func List(state *state.State) ([]db.NodeInfo, map[int64]bool, time.Duration, error) {
 	addresses := []string{} // Addresses of database nodes
 	err := state.Node.Transaction(func(tx *db.NodeTx) error {
 		nodes, err := tx.RaftNodes()
@@ -477,26 +478,33 @@ func List(state *state.State) ([]db.NodeInfo, map[int64]bool, error) {
 		return nil
 	})
 	if err != nil {
-		return nil, nil, err
+		return nil, nil, -1, err
 	}
 
 	var nodes []db.NodeInfo
+	var offlineThreshold time.Duration
+
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		nodes, err = tx.Nodes()
 		if err != nil {
 			return err
 		}
+		offlineThreshold, err = tx.NodeOfflineThreshold()
+		if err != nil {
+			return err
+		}
+
 		return nil
 	})
 	if err != nil {
-		return nil, nil, err
+		return nil, nil, -1, err
 	}
 	flags := make(map[int64]bool) // Whether a node is a database node
 	for _, node := range nodes {
 		flags[node.ID] = shared.StringInSlice(node.Address, addresses)
 	}
 
-	return nodes, flags, nil
+	return nodes, flags, offlineThreshold, nil
 }
 
 // Count is a convenience for checking the current number of nodes in the
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index d1d86f4eb..9365bdccf 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -308,11 +308,12 @@ func TestJoin(t *testing.T) {
 	assert.Equal(t, address, raftNodes[1].Address)
 
 	// The List function returns all nodes in the cluster.
-	nodes, flags, err := cluster.List(state)
+	nodes, flags, offlineThreshold, err := cluster.List(state)
 	require.NoError(t, err)
 	assert.Len(t, nodes, 2)
 	assert.True(t, flags[1])
 	assert.True(t, flags[2])
+	assert.Equal(t, float64(20), offlineThreshold.Seconds())
 
 	// The Count function returns the number of nodes.
 	count, err := cluster.Count(state)
diff --git a/lxd/cluster/notify.go b/lxd/cluster/notify.go
index 7cdbb1766..6c2042455 100644
--- a/lxd/cluster/notify.go
+++ b/lxd/cluster/notify.go
@@ -44,6 +44,11 @@ func NewNotifier(state *state.State, cert *shared.CertInfo, policy NotifierPolic
 
 	peers := []string{}
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		offlineThreshold, err := tx.NodeOfflineThreshold()
+		if err != nil {
+			return err
+		}
+
 		nodes, err := tx.Nodes()
 		if err != nil {
 			return err
@@ -52,7 +57,7 @@ func NewNotifier(state *state.State, cert *shared.CertInfo, policy NotifierPolic
 			if node.Address == address || node.Address == "0.0.0.0" {
 				continue // Exclude ourselves
 			}
-			if node.IsDown() {
+			if node.IsOffline(offlineThreshold) {
 				switch policy {
 				case NotifyAll:
 					return fmt.Errorf("peer node %s is down", node.Address)
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index c97667376..200791101 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -92,6 +92,11 @@ SELECT nodes.id, nodes.address
 //
 // Containers whose node is down are addeded to the special address "0.0.0.0".
 func (c *ClusterTx) ContainersListByNodeAddress() (map[string][]string, error) {
+	offlineThreshold, err := c.NodeOfflineThreshold()
+	if err != nil {
+		return nil, err
+	}
+
 	stmt := `
 SELECT containers.name, nodes.id, nodes.address, nodes.heartbeat
   FROM containers JOIN nodes ON nodes.id = containers.node_id
@@ -117,7 +122,7 @@ SELECT containers.name, nodes.id, nodes.address, nodes.heartbeat
 		}
 		if nodeID == c.nodeID {
 			nodeAddress = ""
-		} else if nodeIsDown(nodeHeartbeat) {
+		} else if nodeIsOffline(offlineThreshold, nodeHeartbeat) {
 			nodeAddress = "0.0.0.0"
 		}
 		result[nodeAddress] = append(result[nodeAddress], name)
diff --git a/lxd/db/images.go b/lxd/db/images.go
index 77b7eb1bb..c9138afb9 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -320,7 +320,11 @@ WHERE images.fingerprint = ?
 	var addresses []string  // Addresses of online nodes with the image
 
 	err := c.Transaction(func(tx *ClusterTx) error {
-		var err error
+		offlineThreshold, err := tx.NodeOfflineThreshold()
+		if err != nil {
+			return err
+		}
+
 		localAddress, err = tx.NodeAddress()
 		if err != nil {
 			return err
@@ -334,7 +338,7 @@ WHERE images.fingerprint = ?
 			if err != nil {
 				return err
 			}
-			if address != localAddress && node.IsDown() {
+			if address != localAddress && node.IsOffline(offlineThreshold) {
 				continue
 			}
 			addresses = append(addresses, address)
diff --git a/lxd/db/node.go b/lxd/db/node.go
index e4d393360..d49cc4505 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -2,6 +2,7 @@ package db
 
 import (
 	"fmt"
+	"strconv"
 	"time"
 
 	"github.com/lxc/lxd/lxd/db/cluster"
@@ -21,10 +22,10 @@ type NodeInfo struct {
 	Heartbeat     time.Time // Timestamp of the last heartbeat
 }
 
-// IsDown returns true if the last heartbeat time of the node is older than 20
-// seconds.
-func (n NodeInfo) IsDown() bool {
-	return nodeIsDown(n.Heartbeat)
+// IsOffline returns true if the last successful heartbeat time of the node is
+// older than the given threshold.
+func (n NodeInfo) IsOffline(threshold time.Duration) bool {
+	return nodeIsOffline(threshold, n.Heartbeat)
 }
 
 // NodeByAddress returns the node with the given network address.
@@ -263,10 +264,30 @@ func (c *ClusterTx) NodeClear(id int64) error {
 	return nil
 }
 
-func nodeIsDown(heartbeat time.Time) bool {
-	return heartbeat.Before(time.Now().Add(-time.Duration(nodeDownThreshold) * time.Second))
+// NodeOfflineThreshold returns the amount of time that needs to elapse after
+// which a series of unsuccessful heartbeat will make the node be considered
+// offline.
+func (c *ClusterTx) NodeOfflineThreshold() (time.Duration, error) {
+	threshold := time.Duration(DefaultOfflineThreshold) * time.Second
+	values, err := query.SelectStrings(
+		c.tx, "SELECT value FROM config WHERE key='cluster.offline_threshold'")
+	if err != nil {
+		return -1, err
+	}
+	if len(values) > 0 {
+		seconds, err := strconv.Atoi(values[0])
+		if err != nil {
+			return -1, err
+		}
+		threshold = time.Duration(seconds) * time.Second
+	}
+	return threshold, nil
+}
+
+func nodeIsOffline(threshold time.Duration, heartbeat time.Time) bool {
+	return heartbeat.Before(time.Now().Add(-threshold))
 }
 
-// How many seconds to wait before considering a node offline after no
-// heartbeat was received.
-var nodeDownThreshold = 20
+// DefaultOfflineThreshold is the default value for the
+// cluster.offline_threshold configuration key, expressed in seconds.
+const DefaultOfflineThreshold = 20
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 439240b14..33e152f20 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -30,7 +30,7 @@ func TestNodeAdd(t *testing.T) {
 	assert.Equal(t, "1.2.3.4:666", node.Address)
 	assert.Equal(t, cluster.SchemaVersion, node.Schema)
 	assert.Equal(t, len(version.APIExtensions), node.APIExtensions)
-	assert.False(t, node.IsDown())
+	assert.False(t, node.IsOffline(20*time.Second))
 
 	node, err = tx.NodeByName("buzz")
 	require.NoError(t, err)
@@ -120,7 +120,7 @@ func TestNodeHeartbeat(t *testing.T) {
 	require.Len(t, nodes, 2)
 
 	node := nodes[1]
-	assert.True(t, node.IsDown())
+	assert.True(t, node.IsOffline(20*time.Second))
 }
 
 // A node is considered empty only if it has no containers and no images.
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index f8cff92f9..f8eaa1976 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -68,8 +68,9 @@ test_clustering_membership() {
 
   # Shutdown a non-database node, and wait a few seconds so it will be
   # detected as down.
+  LXD_DIR="${LXD_FOUR_DIR}" lxc config set cluster.offline_threshold 4
   LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown
-  sleep 22
+  sleep 6
   LXD_DIR="${LXD_THREE_DIR}" lxc cluster list | grep "node5" | grep -q "OFFLINE"
 
   # Trying to delete the preseeded network now fails, because a node is degraded.
@@ -202,8 +203,9 @@ test_clustering_containers() {
 
   # Shutdown node 2, wait for it to be considered offline, and list
   # containers.
+  LXD_DIR="${LXD_THREE_DIR}" lxc config set cluster.offline_threshold 4
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
-  sleep 22
+  sleep 6
   LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q ERROR
 
   LXD_DIR="${LXD_THREE_DIR}" lxd shutdown

From 4b0b0ef3c34bf862f09609fd17c5794a0240db45 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 15 Jan 2018 15:21:03 +0000
Subject: [PATCH 164/227] Make the cluster.Events task non-blocking when it
 stops

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/events.go | 125 ++++++++++++++++++++++++++++----------------------
 1 file changed, 70 insertions(+), 55 deletions(-)

diff --git a/lxd/cluster/events.go b/lxd/cluster/events.go
index b9e0b4751..f01f0b0e2 100644
--- a/lxd/cluster/events.go
+++ b/lxd/cluster/events.go
@@ -20,79 +20,94 @@ import (
 func Events(endpoints *endpoints.Endpoints, cluster *db.Cluster, f func(int64, interface{})) (task.Func, task.Schedule) {
 	listeners := map[int64]*lxd.EventListener{}
 
-	// Update our pool of event listeners.
+	// Update our pool of event listeners. Since database queries are
+	// blocking, we spawn the actual logic in a goroutine, to abort
+	// immediately when we receive the stop signal.
 	update := func(ctx context.Context) {
-		// Get the current cluster nodes.
-		var nodes []db.NodeInfo
-		var offlineThreshold time.Duration
+		ch := make(chan struct{})
+		go func() {
+			eventsUpdateListeners(endpoints, cluster, listeners, f)
+			ch <- struct{}{}
+		}()
+		select {
+		case <-ch:
+		case <-ctx.Done():
+		}
 
-		err := cluster.Transaction(func(tx *db.ClusterTx) error {
-			var err error
+	}
 
-			nodes, err = tx.Nodes()
-			if err != nil {
-				return err
-			}
+	schedule := task.Every(time.Second)
 
-			offlineThreshold, err = tx.NodeOfflineThreshold()
-			if err != nil {
-				return err
-			}
+	return update, schedule
+}
 
-			return nil
-		})
+func eventsUpdateListeners(endpoints *endpoints.Endpoints, cluster *db.Cluster, listeners map[int64]*lxd.EventListener, f func(int64, interface{})) {
+	// Get the current cluster nodes.
+	var nodes []db.NodeInfo
+	var offlineThreshold time.Duration
+
+	err := cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+
+		nodes, err = tx.Nodes()
 		if err != nil {
-			logger.Warnf("Failed to get current cluster nodes: %v", err)
-			return
+			return err
 		}
-		if len(nodes) == 1 {
-			return // Either we're not clustered or this is a single-node cluster
+
+		offlineThreshold, err = tx.NodeOfflineThreshold()
+		if err != nil {
+			return err
 		}
 
-		address := endpoints.NetworkAddress()
+		return nil
+	})
+	if err != nil {
+		logger.Warnf("Failed to get current cluster nodes: %v", err)
+		return
+	}
+	if len(nodes) == 1 {
+		return // Either we're not clustered or this is a single-node cluster
+	}
+
+	address := endpoints.NetworkAddress()
 
-		ids := make([]int, len(nodes))
-		for i, node := range nodes {
-			ids[i] = int(node.ID)
+	ids := make([]int, len(nodes))
+	for i, node := range nodes {
+		ids[i] = int(node.ID)
 
-			// Don't bother trying to connect to offline nodes, or to ourselves.
-			if node.IsOffline(offlineThreshold) || node.Address == address {
-				continue
-			}
+		// Don't bother trying to connect to offline nodes, or to ourselves.
+		if node.IsOffline(offlineThreshold) || node.Address == address {
+			continue
+		}
 
-			_, ok := listeners[node.ID]
-
-			// The node has already a listener associated to it.
-			if ok {
-				// Double check that the listener is still
-				// connected. If it is, just move on, other
-				// we'll try to connect again.
-				if listeners[node.ID].Active() {
-					continue
-				}
-				delete(listeners, node.ID)
-			}
+		_, ok := listeners[node.ID]
 
-			listener, err := eventsConnect(node.Address, endpoints.NetworkCert())
-			if err != nil {
-				logger.Warnf("Failed to get events from node %s: %v", node.Address, err)
+		// The node has already a listener associated to it.
+		if ok {
+			// Double check that the listener is still
+			// connected. If it is, just move on, other
+			// we'll try to connect again.
+			if listeners[node.ID].Active() {
 				continue
 			}
-			logger.Debugf("Listening for events on node %s", node.Address)
-			listener.AddHandler(nil, func(event interface{}) { f(node.ID, event) })
-			listeners[node.ID] = listener
+			delete(listeners, node.ID)
 		}
-		for id, listener := range listeners {
-			if !shared.IntInSlice(int(id), ids) {
-				listener.Disconnect()
-				delete(listeners, id)
-			}
+
+		listener, err := eventsConnect(node.Address, endpoints.NetworkCert())
+		if err != nil {
+			logger.Warnf("Failed to get events from node %s: %v", node.Address, err)
+			continue
+		}
+		logger.Debugf("Listening for events on node %s", node.Address)
+		listener.AddHandler(nil, func(event interface{}) { f(node.ID, event) })
+		listeners[node.ID] = listener
+	}
+	for id, listener := range listeners {
+		if !shared.IntInSlice(int(id), ids) {
+			listener.Disconnect()
+			delete(listeners, id)
 		}
 	}
-
-	schedule := task.Every(time.Second)
-
-	return update, schedule
 }
 
 // Establish a client connection to get events from the given node.

From bbb723252b7d22bae121cf65ae7eebdc6b438003 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 15 Jan 2018 15:22:15 +0000
Subject: [PATCH 165/227] Improve error message when tasks don't stop within
 the timeout

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/task/group.go      | 28 ++++++++++++++++++++++------
 lxd/task/group_test.go |  2 +-
 2 files changed, 23 insertions(+), 7 deletions(-)

diff --git a/lxd/task/group.go b/lxd/task/group.go
index ff6531f61..b8b70b306 100644
--- a/lxd/task/group.go
+++ b/lxd/task/group.go
@@ -1,6 +1,9 @@
 package task
 
 import (
+	"fmt"
+	"strconv"
+	"strings"
 	"sync"
 	"time"
 
@@ -11,9 +14,11 @@ import (
 //
 // All tasks in a group will be started and stopped at the same time.
 type Group struct {
-	cancel func()
-	wg     sync.WaitGroup
-	tasks  []Task
+	cancel  func()
+	wg      sync.WaitGroup
+	tasks   []Task
+	running map[int]bool
+	mu      sync.Mutex
 }
 
 // Add a new task to the group, returning its index.
@@ -32,12 +37,17 @@ func (g *Group) Start() {
 	ctx := context.Background()
 	ctx, g.cancel = context.WithCancel(ctx)
 	g.wg.Add(len(g.tasks))
+	g.running = make(map[int]bool)
 	for i := range g.tasks {
 		task := g.tasks[i] // Local variable for the closure below.
-		go func() {
+		g.running[i] = true
+		go func(i int) {
 			task.loop(ctx)
 			g.wg.Done()
-		}()
+			g.mu.Lock()
+			defer g.mu.Unlock()
+			g.running[i] = false
+		}(i)
 	}
 }
 
@@ -73,7 +83,13 @@ func (g *Group) Stop(timeout time.Duration) error {
 	defer cancel()
 	select {
 	case <-ctx.Done():
-		return ctx.Err()
+		running := []string{}
+		for i, value := range g.running {
+			if value {
+				running = append(running, strconv.Itoa(i))
+			}
+		}
+		return fmt.Errorf("tasks %s are still running", strings.Join(running, ", "))
 	case <-graceful:
 		return nil
 
diff --git a/lxd/task/group_test.go b/lxd/task/group_test.go
index 365f49b77..3f93c7e2a 100644
--- a/lxd/task/group_test.go
+++ b/lxd/task/group_test.go
@@ -37,7 +37,7 @@ func TestGroup_StopUngracefully(t *testing.T) {
 
 	assertRecv(t, ok)
 
-	assert.Equal(t, context.DeadlineExceeded, group.Stop(time.Millisecond))
+	assert.EqualError(t, group.Stop(time.Millisecond), "tasks 0 are still running")
 }
 
 // Assert that the given channel receives an object within a second.

From 3f94e87086e1aed64fb1e1fa9f2ef2e7f5ecba3a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 15 Jan 2018 15:24:16 +0000
Subject: [PATCH 166/227] Log a debug message if the gRPC db fails closing,
 instead of warning

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 9bdf50514..8b4ec5312 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -638,20 +638,22 @@ func (d *Daemon) Stop() error {
 		case <-time.After(2 * time.Second):
 			shouldUnmount = true
 		}
+
 		logger.Infof("Closing the database")
-		trackError(d.db.Close())
-	}
-	if d.cluster != nil {
 		err := d.cluster.Close()
 		// If we got io.EOF the network connection was interrupted and
-		// it's likely that the other node shutdown. Let's just log a
-		// warning.
+		// it's likely that the other node shutdown. Let's just log the
+		// event and return cleanly.
 		if errors.Cause(err) == driver.ErrBadConn {
-			logger.Warnf("Could not close remote database: %v", err)
+			logger.Debugf("Could not close remote database cleanly: %v", err)
 		} else {
 			trackError(err)
 		}
 	}
+	if d.db != nil {
+		trackError(d.db.Close())
+	}
+
 	if d.gateway != nil {
 		trackError(d.gateway.Shutdown())
 	}

From 04f7a5c65d78fb6a9d053c6b933b78bdeae77122 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 15 Jan 2018 15:55:05 +0000
Subject: [PATCH 167/227] Extract compareVersions into the lxd/util sub-package

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/open.go | 31 ++-----------------------------
 lxd/util/version.go    | 31 +++++++++++++++++++++++++++++++
 2 files changed, 33 insertions(+), 29 deletions(-)
 create mode 100644 lxd/util/version.go

diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index f12b024e0..09a052cd7 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -8,6 +8,7 @@ import (
 	"github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/lxd/db/schema"
+	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared/version"
 	"github.com/pkg/errors"
 )
@@ -145,7 +146,7 @@ func checkClusterIsUpgradable(tx *sql.Tx, target [2]int) error {
 	}
 
 	for _, version := range versions {
-		n, err := compareVersions(target, version)
+		n, err := util.CompareVersions(target, version)
 		if err != nil {
 			return err
 		}
@@ -173,32 +174,4 @@ func checkClusterIsUpgradable(tx *sql.Tx, target [2]int) error {
 	return nil
 }
 
-// Compare two nodes versions.
-//
-// A version consists of the version the node's schema and the number of API
-// extensions it supports.
-//
-// Return 0 if they equal, 1 if the first version is greater than the second
-// and 2 if the second is greater than the first.
-//
-// Return an error if inconsistent versions are detected, for example the first
-// node's schema is greater than the second's, but the number of extensions is
-// smaller.
-func compareVersions(version1, version2 [2]int) (int, error) {
-	schema1, extensions1 := version1[0], version1[1]
-	schema2, extensions2 := version2[0], version2[1]
-
-	if schema1 == schema2 && extensions1 == extensions2 {
-		return 0, nil
-	}
-	if schema1 >= schema2 && extensions1 >= extensions2 {
-		return 1, nil
-	}
-	if schema1 <= schema2 && extensions1 <= extensions2 {
-		return 2, nil
-	}
-
-	return -1, fmt.Errorf("nodes have inconsistent versions")
-}
-
 var errSomeNodesAreBehind = fmt.Errorf("some nodes are behind this node's version")
diff --git a/lxd/util/version.go b/lxd/util/version.go
new file mode 100644
index 000000000..a15a0308c
--- /dev/null
+++ b/lxd/util/version.go
@@ -0,0 +1,31 @@
+package util
+
+import "fmt"
+
+// CompareVersions the versions of two LXD nodes.
+//
+// A version consists of the version the node's schema and the number of API
+// extensions it supports.
+//
+// Return 0 if they equal, 1 if the first version is greater than the second
+// and 2 if the second is greater than the first.
+//
+// Return an error if inconsistent versions are detected, for example the first
+// node's schema is greater than the second's, but the number of extensions is
+// smaller.
+func CompareVersions(version1, version2 [2]int) (int, error) {
+	schema1, extensions1 := version1[0], version1[1]
+	schema2, extensions2 := version2[0], version2[1]
+
+	if schema1 == schema2 && extensions1 == extensions2 {
+		return 0, nil
+	}
+	if schema1 >= schema2 && extensions1 >= extensions2 {
+		return 1, nil
+	}
+	if schema1 <= schema2 && extensions1 <= extensions2 {
+		return 2, nil
+	}
+
+	return -1, fmt.Errorf("nodes have inconsistent versions")
+}

From 45b257ba4cf1efa6c9d2651dd779b697f3d979b2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 15 Jan 2018 15:55:32 +0000
Subject: [PATCH 168/227] Add db.Node.Version() returning schema level and API
 extensions

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/node.go      | 6 ++++++
 lxd/db/node_test.go | 1 +
 2 files changed, 7 insertions(+)

diff --git a/lxd/db/node.go b/lxd/db/node.go
index d49cc4505..8cd0bbcc1 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -28,6 +28,12 @@ func (n NodeInfo) IsOffline(threshold time.Duration) bool {
 	return nodeIsOffline(threshold, n.Heartbeat)
 }
 
+// Version returns the node's version, composed by its schema level and
+// number of extensions.
+func (n NodeInfo) Version() [2]int {
+	return [2]int{n.Schema, n.APIExtensions}
+}
+
 // NodeByAddress returns the node with the given network address.
 func (c *ClusterTx) NodeByAddress(address string) (NodeInfo, error) {
 	null := NodeInfo{}
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 33e152f20..619c2ed73 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -30,6 +30,7 @@ func TestNodeAdd(t *testing.T) {
 	assert.Equal(t, "1.2.3.4:666", node.Address)
 	assert.Equal(t, cluster.SchemaVersion, node.Schema)
 	assert.Equal(t, len(version.APIExtensions), node.APIExtensions)
+	assert.Equal(t, [2]int{cluster.SchemaVersion, len(version.APIExtensions)}, node.Version())
 	assert.False(t, node.IsOffline(20*time.Second))
 
 	node, err = tx.NodeByName("buzz")

From 6e063158a95645f6bb2dda39e504b0f3a463c6be Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 15 Jan 2018 16:54:13 +0000
Subject: [PATCH 169/227] Add 'clustering' to API extensions list

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 shared/version/api.go | 1 +
 1 file changed, 1 insertion(+)

diff --git a/shared/version/api.go b/shared/version/api.go
index feb31aab8..dc15c7e01 100644
--- a/shared/version/api.go
+++ b/shared/version/api.go
@@ -87,4 +87,5 @@ var APIExtensions = []string{
 	"maas_network",
 	"devlxd_events",
 	"proxy",
+	"clustering",
 }

From 85c169e428b76f897533ebe7e7043f97a9110391 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 16 Jan 2018 10:50:55 +0000
Subject: [PATCH 170/227] Add APIExtensionsCount artificially changing the
 extensions in tests

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 shared/version/api.go | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

diff --git a/shared/version/api.go b/shared/version/api.go
index dc15c7e01..d24dedfe0 100644
--- a/shared/version/api.go
+++ b/shared/version/api.go
@@ -1,5 +1,10 @@
 package version
 
+import (
+	"os"
+	"strconv"
+)
+
 // APIVersion contains the API base version. Only bumped for backward incompatible changes.
 var APIVersion = "1.0"
 
@@ -89,3 +94,22 @@ var APIExtensions = []string{
 	"proxy",
 	"clustering",
 }
+
+// APIExtensionsCount returns the number of available API extensions.
+func APIExtensionsCount() int {
+	count := len(APIExtensions)
+
+	// This environment variable is an internal one to force the code
+	// to believe that we an API extensions version greater than we
+	// actually have. It's used by integration tests to exercise the
+	// cluster upgrade process.
+	artificialBump := os.Getenv("LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS")
+	if artificialBump != "" {
+		n, err := strconv.Atoi(artificialBump)
+		if err == nil {
+			count += n
+		}
+	}
+
+	return count
+}

From 1ced94c710eae8fdf5de5f195953f4601a6fb7b6 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 16 Jan 2018 10:52:45 +0000
Subject: [PATCH 171/227] Plug APIExtensionsCount into code looking for
 extensions count

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go     | 2 +-
 lxd/db/cluster/open.go | 2 +-
 lxd/db/node.go         | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 6b35e6153..fa2980199 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -343,7 +343,7 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 		}
 		info, err := client.AcceptNode(
 			req.TargetPassword, req.Name, address, cluster.SchemaVersion,
-			len(version.APIExtensions), pools, networks)
+			version.APIExtensionsCount(), pools, networks)
 		if err != nil {
 			return errors.Wrap(err, "failed to request to add node")
 		}
diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index 09a052cd7..c37fcd4e7 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -49,7 +49,7 @@ func Open(name string, dialer grpcsql.Dialer) (*sql.DB, error) {
 // till they get upgraded and restarted).
 func EnsureSchema(db *sql.DB, address string) (bool, error) {
 	someNodesAreBehind := false
-	apiExtensions := len(version.APIExtensions)
+	apiExtensions := version.APIExtensionsCount()
 
 	check := func(current int, tx *sql.Tx) error {
 		// If we're bootstrapping a fresh schema, skip any check, since
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 8cd0bbcc1..cfd1a03a1 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -180,7 +180,7 @@ SELECT id, name, address, description, schema, api_extensions, heartbeat FROM no
 // cluster. It returns the ID of the newly inserted row.
 func (c *ClusterTx) NodeAdd(name string, address string) (int64, error) {
 	columns := []string{"name", "address", "schema", "api_extensions"}
-	values := []interface{}{name, address, cluster.SchemaVersion, len(version.APIExtensions)}
+	values := []interface{}{name, address, cluster.SchemaVersion, version.APIExtensionsCount()}
 	return query.UpsertObject(c.tx, "nodes", columns, values)
 }
 

From 5979c977b891c744f3a6f8db4bb3a36a443d4df8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 16 Jan 2018 10:56:03 +0000
Subject: [PATCH 172/227] Add NotifyUpgradeCompleted to notify nodes blocked on
 schema upgrade

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go      | 31 +++++++++++++++++++++++-----
 lxd/cluster/upgrade.go      | 50 +++++++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/upgrade_test.go | 26 +++++++++++++++++++++++
 3 files changed, 102 insertions(+), 5 deletions(-)
 create mode 100644 lxd/cluster/upgrade.go
 create mode 100644 lxd/cluster/upgrade_test.go

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index d00b08aea..aaa491670 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -43,11 +43,12 @@ func NewGateway(db *db.Node, cert *shared.CertInfo, options ...Option) (*Gateway
 	}
 
 	gateway := &Gateway{
-		db:      db,
-		cert:    cert,
-		options: o,
-		ctx:     ctx,
-		cancel:  cancel,
+		db:        db,
+		cert:      cert,
+		options:   o,
+		ctx:       ctx,
+		cancel:    cancel,
+		upgradeCh: make(chan struct{}, 16),
 	}
 
 	err := gateway.init()
@@ -87,6 +88,10 @@ type Gateway struct {
 	// dialing attempt.
 	ctx    context.Context
 	cancel context.CancelFunc
+
+	// Used to unblock nodes that are waiting for other nodes to upgrade
+	// their version.
+	upgradeCh chan struct{}
 }
 
 // HandlerFuncs returns the HTTP handlers that should be added to the REST API
@@ -129,6 +134,15 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 			return
 		}
 
+		// Handle database upgrade notifications.
+		if r.Method == "PATCH" {
+			select {
+			case g.upgradeCh <- struct{}{}:
+			default:
+			}
+			return
+		}
+
 		// Before actually establishing the gRPC SQL connection, our
 		// dialer probes the node to see if it's currently the leader
 		// (otherwise it tries with another node or retry later).
@@ -195,6 +209,13 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 	}
 }
 
+// WaitUpgradeNotification waits for a notification from another node that all
+// nodes in the cluster should now have been upgraded and have matching schema
+// and API versions.
+func (g *Gateway) WaitUpgradeNotification() {
+	<-g.upgradeCh
+}
+
 // Dialer returns a gRPC dial function that can be used to connect to one of
 // the dqlite nodes via gRPC.
 func (g *Gateway) Dialer() grpcsql.Dialer {
diff --git a/lxd/cluster/upgrade.go b/lxd/cluster/upgrade.go
new file mode 100644
index 000000000..c9f12d6a5
--- /dev/null
+++ b/lxd/cluster/upgrade.go
@@ -0,0 +1,50 @@
+package cluster
+
+import (
+	"fmt"
+	"net/http"
+
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared"
+	"github.com/pkg/errors"
+)
+
+// NotifyUpgradeCompleted sends a notification to all other nodes in the
+// cluster that any possible pending database update has been applied, and any
+// nodes which was waiting for this node to be upgraded should re-check if it's
+// okay to move forward.
+func NotifyUpgradeCompleted(state *state.State, cert *shared.CertInfo) error {
+	notifier, err := NewNotifier(state, cert, NotifyAll)
+	if err != nil {
+		return err
+	}
+	return notifier(func(client lxd.ContainerServer) error {
+		host, err := client.GetServerHost()
+		if err != nil {
+			return errors.Wrap(err, "failed to get connection info")
+		}
+
+		url := fmt.Sprintf("%s%s", host, grpcEndpoint)
+		request, err := http.NewRequest("PATCH", url, nil)
+		if err != nil {
+			return errors.Wrap(err, "failed to create database notify upgrade request")
+		}
+
+		httpClient, err := client.GetHTTPClient()
+		if err != nil {
+			return errors.Wrap(err, "failed to get HTTP client")
+		}
+
+		response, err := httpClient.Do(request)
+		if err != nil {
+			return errors.Wrap(err, "failed to notify node about completed upgrade")
+		}
+
+		if response.StatusCode != http.StatusOK {
+			return fmt.Errorf("database upgrade notification failed: %s", response.Status)
+		}
+
+		return nil
+	})
+}
diff --git a/lxd/cluster/upgrade_test.go b/lxd/cluster/upgrade_test.go
new file mode 100644
index 000000000..63cc3e06e
--- /dev/null
+++ b/lxd/cluster/upgrade_test.go
@@ -0,0 +1,26 @@
+package cluster_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/stretchr/testify/require"
+)
+
+// A node can unblock other nodes that were waiting for a cluster upgrade to
+// complete.
+func TestNotifyUpgradeCompleted(t *testing.T) {
+	f := heartbeatFixture{t: t}
+	defer f.Cleanup()
+
+	gateway0 := f.Bootstrap()
+	gateway1 := f.Grow()
+
+	state0 := f.State(gateway0)
+
+	cert0 := gateway0.Cert()
+	err := cluster.NotifyUpgradeCompleted(state0, cert0)
+	require.NoError(t, err)
+
+	gateway1.WaitUpgradeNotification()
+}

From 42a19d43e132cfe061eb93cce796796465727a90 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 16 Jan 2018 10:59:14 +0000
Subject: [PATCH 173/227] Make cluster.List detect upgrading nodes and set the
 BLOCKED status

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go             | 53 ++++-------------------------------
 lxd/cluster/membership.go      | 63 ++++++++++++++++++++++++++++++++++--------
 lxd/cluster/membership_test.go |  9 +++---
 shared/api/cluster.go          |  1 +
 4 files changed, 64 insertions(+), 62 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index fa2980199..fe3b8af66 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -380,23 +380,11 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 }
 
 func clusterNodesGet(d *Daemon, r *http.Request) Response {
-	dbNodes, flags, offlineThreshold, err := cluster.List(d.State())
+	nodes, err := cluster.List(d.State())
 	if err != nil {
 		return SmartError(err)
 	}
 
-	nodes := make([]api.Node, len(dbNodes))
-	for i, dbNode := range dbNodes {
-		nodes[i].Name = dbNode.Name
-		nodes[i].URL = fmt.Sprintf("https://%s", dbNode.Address)
-		nodes[i].Database = flags[dbNode.ID]
-		if dbNode.IsOffline(offlineThreshold) {
-			nodes[i].State = "OFFLINE"
-		} else {
-			nodes[i].State = "ONLINE"
-		}
-	}
-
 	return SyncResponse(true, nodes)
 }
 
@@ -409,48 +397,19 @@ var clusterNodeCmd = Command{
 
 func clusterNodeGet(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
-	node := api.Node{}
-	node.Name = name
-	address := ""
-	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
-		offlineThreshold, err := tx.NodeOfflineThreshold()
-		if err != nil {
-			return err
-		}
 
-		dbNode, err := tx.NodeByName(name)
-		if err != nil {
-			return err
-		}
-		address = dbNode.Address
-		node.URL = fmt.Sprintf("https://%s", dbNode.Address)
-		if dbNode.IsOffline(offlineThreshold) {
-			node.State = "OFFLINE"
-		} else {
-			node.State = "ONLINE"
-		}
-		return nil
-	})
+	nodes, err := cluster.List(d.State())
 	if err != nil {
 		return SmartError(err)
 	}
 
-	// Figure out if this node is currently a database node.
-	err = d.db.Transaction(func(tx *db.NodeTx) error {
-		addresses, err := tx.RaftNodeAddresses()
-		if err != nil {
-			return err
+	for _, node := range nodes {
+		if node.Name == name {
+			return SyncResponse(true, node)
 		}
-		if shared.StringInSlice(address, addresses) {
-			node.Database = true
-		}
-		return nil
-	})
-	if err != nil {
-		return SmartError(err)
 	}
 
-	return SyncResponse(true, node)
+	return NotFound
 }
 
 func clusterNodePost(d *Daemon, r *http.Request) Response {
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index b5cb880a7..fec67dda6 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -14,7 +14,9 @@ import (
 	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/log15"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/pkg/errors"
@@ -461,11 +463,7 @@ func Leave(state *state.State, gateway *Gateway, name string, force bool) (strin
 }
 
 // List the nodes of the cluster.
-//
-// Upon success return a list of the current nodes, a map that for each ID
-// tells if the node is part of the database cluster or not, and the configured
-// offline threshold.
-func List(state *state.State) ([]db.NodeInfo, map[int64]bool, time.Duration, error) {
+func List(state *state.State) ([]api.Node, error) {
 	addresses := []string{} // Addresses of database nodes
 	err := state.Node.Transaction(func(tx *db.NodeTx) error {
 		nodes, err := tx.RaftNodes()
@@ -478,7 +476,7 @@ func List(state *state.State) ([]db.NodeInfo, map[int64]bool, time.Duration, err
 		return nil
 	})
 	if err != nil {
-		return nil, nil, -1, err
+		return nil, err
 	}
 
 	var nodes []db.NodeInfo
@@ -497,14 +495,57 @@ func List(state *state.State) ([]db.NodeInfo, map[int64]bool, time.Duration, err
 		return nil
 	})
 	if err != nil {
-		return nil, nil, -1, err
+		return nil, err
 	}
-	flags := make(map[int64]bool) // Whether a node is a database node
-	for _, node := range nodes {
-		flags[node.ID] = shared.StringInSlice(node.Address, addresses)
+
+	result := make([]api.Node, len(nodes))
+	now := time.Now()
+	version := nodes[0].Version()
+	for i, node := range nodes {
+		result[i].Name = node.Name
+		result[i].URL = fmt.Sprintf("https://%s", node.Address)
+		result[i].Database = shared.StringInSlice(node.Address, addresses)
+		if node.IsOffline(offlineThreshold) {
+			result[i].State = "OFFLINE"
+			result[i].Message = fmt.Sprintf(
+				"no heartbeat since %s", now.Sub(node.Heartbeat))
+		} else {
+			result[i].State = "ONLINE"
+			result[i].Message = "fully operational"
+		}
+
+		n, err := util.CompareVersions(version, node.Version())
+		if err != nil {
+			result[i].State = "BROKEN"
+			result[i].Message = "inconsistent version"
+			continue
+		}
+
+		if n == 1 {
+			// This node's version is lower, which means the
+			// version that the previous node in the loop has been
+			// upgraded.
+			version = node.Version()
+		}
+	}
+
+	// Update the state of online nodes that have been upgraded and whose
+	// schema is more recent than the rest of the nodes.
+	for i, node := range nodes {
+		if result[i].State != "ONLINE" {
+			continue
+		}
+		n, err := util.CompareVersions(version, node.Version())
+		if err != nil {
+			continue
+		}
+		if n == 2 {
+			result[i].State = "BLOCKED"
+			result[i].Message = "waiting for other nodes to be upgraded"
+		}
 	}
 
-	return nodes, flags, offlineThreshold, nil
+	return result, nil
 }
 
 // Count is a convenience for checking the current number of nodes in the
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 9365bdccf..4e4bf362f 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -308,12 +308,13 @@ func TestJoin(t *testing.T) {
 	assert.Equal(t, address, raftNodes[1].Address)
 
 	// The List function returns all nodes in the cluster.
-	nodes, flags, offlineThreshold, err := cluster.List(state)
+	nodes, err := cluster.List(state)
 	require.NoError(t, err)
 	assert.Len(t, nodes, 2)
-	assert.True(t, flags[1])
-	assert.True(t, flags[2])
-	assert.Equal(t, float64(20), offlineThreshold.Seconds())
+	assert.Equal(t, "ONLINE", nodes[0].State)
+	assert.Equal(t, "ONLINE", nodes[1].State)
+	assert.True(t, nodes[0].Database)
+	assert.True(t, nodes[1].Database)
 
 	// The Count function returns the number of nodes.
 	count, err := cluster.Count(state)
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index e68ce5551..333a4adcd 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -54,4 +54,5 @@ type Node struct {
 	URL      string `json:"url" yaml:"url"`
 	Database bool   `json:"database" yaml:"database"`
 	State    string `json:"state" yaml:"state"`
+	Message  string `json:"message" yaml:"message"`
 }

From 58a1930203ca2e33ec07f3794db922666b843464 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 16 Jan 2018 11:00:45 +0000
Subject: [PATCH 174/227] Add MESSAGE column to lxc cluster list with extra
 state information

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/cluster.go | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/lxc/cluster.go b/lxc/cluster.go
index 02e5d833c..e21c4f1e6 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -170,7 +170,8 @@ func (c *clusterCmd) doClusterList(conf *config.Config, args []string) error {
 		if node.Database {
 			database = "YES"
 		}
-		data = append(data, []string{node.Name, node.URL, database, node.State})
+		line := []string{node.Name, node.URL, database, node.State, node.Message}
+		data = append(data, line)
 	}
 
 	table := tablewriter.NewWriter(os.Stdout)
@@ -181,7 +182,9 @@ func (c *clusterCmd) doClusterList(conf *config.Config, args []string) error {
 		i18n.G("NAME"),
 		i18n.G("URL"),
 		i18n.G("DATABASE"),
-		i18n.G("STATE")})
+		i18n.G("STATE"),
+		i18n.G("MESSAGE"),
+	})
 	sort.Sort(byName(data))
 	table.AppendBulk(data)
 	table.Render()

From 15a2ab39719eb2c702c12242515ded4b0f2490f1 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 16 Jan 2018 11:04:01 +0000
Subject: [PATCH 175/227] Add "wait" param to respawn_lxd to make blocking on
 waitready optional

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/includes/lxd.sh             | 15 ++++++++++++---
 test/suites/basic.sh             |  2 +-
 test/suites/image_auto_update.sh |  2 +-
 3 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index 8d9e75b31..ab34b66f1 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -98,6 +98,9 @@ respawn_lxd() {
     lxddir=${1}
     shift
 
+    wait=${1}
+    shift
+
     # Link to local sqlite with replication patch for dqlite
     sqlite="$(pwd)/../lxd/sqlite"
     if [ -e "/lxc-ci/build/cache/sqlite" ]; then
@@ -106,13 +109,19 @@ respawn_lxd() {
 
     echo "==> Spawning lxd in ${lxddir}"
     # shellcheck disable=SC2086
-    LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+    if [ "${LXD_NETNS}" = "" ]; then
+	LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+    else
+	pid="$(lxc-info -n "${LXD_NETNS}" -p | cut -f 2 -d : | tr -d " ")"
+	LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" nsenter --all --target="${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &    fi
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
     echo "==> Spawned LXD (PID is ${LXD_PID})"
 
-    echo "==> Confirming lxd is responsive"
-    LXD_DIR="${lxddir}" lxd waitready --timeout=300
+    if [ "${wait}" = true ]; then
+	echo "==> Confirming lxd is responsive"
+	LXD_DIR="${lxddir}" lxd waitready --timeout=300
+    fi
 }
 
 kill_lxd() {
diff --git a/test/suites/basic.sh b/test/suites/basic.sh
index dfb3ad1af..523b0a09f 100644
--- a/test/suites/basic.sh
+++ b/test/suites/basic.sh
@@ -278,7 +278,7 @@ test_basic_usage() {
     LD_LIBRARY_PATH="${sqlite}/.libs" lxd activateifneeded --debug 2>&1 | grep -q "Daemon has auto-started containers, activating..."
 
     # shellcheck disable=SC2031
-    respawn_lxd "${LXD_DIR}"
+    respawn_lxd "${LXD_DIR}" true
 
     lxc list --force-local autostart | grep -q RUNNING
 
diff --git a/test/suites/image_auto_update.sh b/test/suites/image_auto_update.sh
index a786a0e89..d552de52f 100644
--- a/test/suites/image_auto_update.sh
+++ b/test/suites/image_auto_update.sh
@@ -32,7 +32,7 @@ test_image_auto_update() {
   # Restart the server to force an image refresh immediately
   # shellcheck disable=2153
   shutdown_lxd "${LXD_DIR}"
-  respawn_lxd "${LXD_DIR}"
+  respawn_lxd "${LXD_DIR}" true
 
   # Check that the first image got deleted from the local storage
   #

From 6f739e52513e540e9d3563666aa5339a4062bc4c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 16 Jan 2018 11:05:54 +0000
Subject: [PATCH 176/227] Handle cluster upgrades by switching daemons to the
 BLOCKED state

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go             | 32 +++++++++++++++--
 lxd/db/db.go              | 12 +++++--
 test/main.sh              |  1 +
 test/suites/clustering.sh | 88 +++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 129 insertions(+), 4 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 8b4ec5312..3d8059640 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -450,10 +450,38 @@ func (d *Daemon) init() error {
 	}
 
 	/* Open the cluster database */
-	d.cluster, err = db.OpenCluster("db.bin", d.gateway.Dialer(), address)
-	if err != nil {
+	for {
+		d.cluster, err = db.OpenCluster("db.bin", d.gateway.Dialer(), address)
+		if err == nil {
+			break
+		}
+		// If some other nodes have schema or API versions less recent
+		// than this node, we block until we receive a notification
+		// from the last node being upgraded that everything should be
+		// now fine, and then retry
+		if err == db.ErrSomeNodesAreBehind {
+			logger.Info("Wait for other cluster nodes to upgrade their versions")
+
+			// The only thing we want to still do on this node is
+			// to run the heartbeat task, in case we are the raft
+			// leader.
+			stop, _ := task.Start(cluster.Heartbeat(d.gateway, d.cluster))
+			d.gateway.WaitUpgradeNotification()
+			stop(time.Second)
+
+			d.cluster.Close()
+
+			continue
+		}
 		return errors.Wrap(err, "failed to open cluster database")
 	}
+	err = cluster.NotifyUpgradeCompleted(d.State(), certInfo)
+	if err != nil {
+		// Ignore the error, since it's not fatal for this particular
+		// node. In most cases it just means that some nodes are
+		// offline.
+		logger.Debugf("Could not notify all nodes of database upgrade: %v", err)
+	}
 
 	/* Migrate the node local data to the cluster database, if needed */
 	if dump != nil {
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 8746a0b0a..aea4d4de9 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -184,7 +184,7 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 		}
 	}
 
-	_, err = cluster.EnsureSchema(db, address)
+	nodesVersionsMatch, err := cluster.EnsureSchema(db, address)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to ensure schema")
 	}
@@ -217,9 +217,17 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 		return nil, err
 	}
 
-	return cluster, nil
+	if !nodesVersionsMatch {
+		err = ErrSomeNodesAreBehind
+	}
+
+	return cluster, err
 }
 
+// ErrSomeNodesAreBehind is returned by OpenCluster if some of the nodes in the
+// cluster have a schema or API version that is less recent than this node.
+var ErrSomeNodesAreBehind = fmt.Errorf("some nodes are behind this node's version")
+
 // ForLocalInspection is a aid for the hack in initializeDbObject, which
 // sets the db-related Deamon attributes upfront, to be backward compatible
 // with the legacy patches that need to interact with the database.
diff --git a/test/main.sh b/test/main.sh
index 246d2229e..de9f89284 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -198,6 +198,7 @@ run_test test_clustering_membership "clustering membership"
 run_test test_clustering_containers "clustering containers"
 run_test test_clustering_storage "clustering storage"
 run_test test_clustering_network "clustering network"
+run_test test_clustering_upgrade "clustering upgrade"
 
 # shellcheck disable=SC2034
 TEST_RESULT=success
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index f8eaa1976..6651544b9 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -344,3 +344,91 @@ test_clustering_network() {
   teardown_clustering_netns
   teardown_clustering_bridge
 }
+
+test_clustering_upgrade() {
+  setup_clustering_bridge
+  prefix="lxd$$"
+  bridge="${prefix}"
+
+  # First, test the upgrade with a 2-node cluster
+  setup_clustering_netns 1
+  LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_ONE_DIR}"
+  ns1="${prefix}1"
+  spawn_lxd_and_bootstrap_cluster "${ns1}" "${bridge}" "${LXD_ONE_DIR}"
+
+  # Add a newline at the end of each line. YAML as weird rules..
+  cert=$(sed ':a;N;$!ba;s/\n/\n\n/g' "${LXD_ONE_DIR}/server.crt")
+
+  # Spawn a second node
+  setup_clustering_netns 2
+  LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_TWO_DIR}"
+  ns2="${prefix}2"
+  spawn_lxd_and_join_cluster "${ns2}" "${bridge}" "${cert}" 2 1 "${LXD_TWO_DIR}"
+
+  # Respawn the second node, making it believe it has an higher
+  # version than it actually has.
+  export LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS=1
+  shutdown_lxd "${LXD_TWO_DIR}"
+  LXD_NETNS="${ns2}" respawn_lxd "${LXD_TWO_DIR}" false
+
+  # The second daemon is blocked waiting for the other to be upgraded
+  ! LXD_DIR="${LXD_TWO_DIR}" lxd waitready --timeout=5
+
+  LXD_DIR="${LXD_ONE_DIR}" lxc cluster show node1 | grep -q "message: fully operational"
+  LXD_DIR="${LXD_ONE_DIR}" lxc cluster show node2 | grep -q "message: waiting for other nodes to be upgraded"
+
+  # Respawn the first node, so it matches the version the second node
+  # believes to have.
+  shutdown_lxd "${LXD_ONE_DIR}"
+  LXD_NETNS="${ns1}" respawn_lxd "${LXD_ONE_DIR}" true
+
+  # The second daemon has now unblocked
+  LXD_DIR="${LXD_TWO_DIR}" lxd waitready --timeout=30
+
+  # The cluster is again operational
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc cluster list | grep -q "OFFLINE"
+
+  # Now spawn a third node and test the upgrade with a 3-node cluster.
+  setup_clustering_netns 3
+  LXD_THREE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_THREE_DIR}"
+  ns3="${prefix}3"
+  spawn_lxd_and_join_cluster "${ns3}" "${bridge}" "${cert}" 3 1 "${LXD_THREE_DIR}"
+
+  # Respawn the second node, making it believe it has an higher
+  # version than it actually has.
+  export LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS=2
+  shutdown_lxd "${LXD_TWO_DIR}"
+  LXD_NETNS="${ns2}" respawn_lxd "${LXD_TWO_DIR}" false
+
+  # The second daemon is blocked waiting for the other two to be
+  # upgraded
+  ! LXD_DIR="${LXD_TWO_DIR}" lxd waitready --timeout=5
+
+  LXD_DIR="${LXD_ONE_DIR}" lxc cluster show node1 | grep -q "message: fully operational"
+  LXD_DIR="${LXD_ONE_DIR}" lxc cluster show node2 | grep -q "message: waiting for other nodes to be upgraded"
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster show node3 | grep -q "message: fully operational"
+
+  # Respawn the first node and third node, so they match the version
+  # the second node believes to have.
+  shutdown_lxd "${LXD_ONE_DIR}"
+  LXD_NETNS="${ns1}" respawn_lxd "${LXD_ONE_DIR}" false
+  shutdown_lxd "${LXD_THREE_DIR}"
+  LXD_NETNS="${ns3}" respawn_lxd "${LXD_THREE_DIR}" true
+
+  # The cluster is again operational
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc cluster list | grep -q "OFFLINE"
+
+  LXD_DIR="${LXD_THREE_DIR}" lxd shutdown
+  LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
+  LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
+  sleep 2
+  rm -f "${LXD_THREE_DIR}/unix.socket"
+  rm -f "${LXD_TWO_DIR}/unix.socket"
+  rm -f "${LXD_ONE_DIR}/unix.socket"
+
+  teardown_clustering_netns
+  teardown_clustering_bridge
+}

From fa8727ab31b025ccd8a59ccb275b65d93567f346 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 16 Jan 2018 16:45:12 +0000
Subject: [PATCH 177/227] Add cmd.AskPasswordOnce for entering a password
 without confirmation

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 shared/cmd/context.go      | 10 ++++++++++
 shared/cmd/context_test.go | 14 ++++++++++++++
 2 files changed, 24 insertions(+)

diff --git a/shared/cmd/context.go b/shared/cmd/context.go
index f2659956d..e8d7d6104 100644
--- a/shared/cmd/context.go
+++ b/shared/cmd/context.go
@@ -138,6 +138,16 @@ func (c *Context) AskPassword(question string, reader func(int) ([]byte, error))
 	}
 }
 
+// AskPasswordOnce asks the user to enter a password.
+//
+// It's the same as AskPassword, but it won't ask to enter it again.
+func (c *Context) AskPasswordOnce(question string, reader func(int) ([]byte, error)) string {
+	fmt.Fprintf(c.stdout, question)
+	pwd, _ := reader(0)
+	fmt.Fprintf(c.stdout, "\n")
+	return string(pwd)
+}
+
 // InputYAML treats stdin as YAML content and returns the unmarshalled
 // structure
 func (c *Context) InputYAML(out interface{}) error {
diff --git a/shared/cmd/context_test.go b/shared/cmd/context_test.go
index 7f73e57fd..99cad710c 100644
--- a/shared/cmd/context_test.go
+++ b/shared/cmd/context_test.go
@@ -166,6 +166,20 @@ func TestAskPassword(t *testing.T) {
 	}
 }
 
+// AskPasswordOnce returns the password entered once by the user.
+func TestAskPasswordOnce(t *testing.T) {
+	streams := cmd.NewMemoryStreams("")
+	context := cmd.NewMemoryContext(streams)
+
+	reader := func(int) ([]byte, error) {
+		return []byte("pwd"), nil
+	}
+
+	result := context.AskPassword("Pass?", reader)
+
+	assert.Equal(t, "pwd", result, "Unexpected answer result")
+}
+
 // InputYAML parses the YAML content passed via stdin.
 func TestInputYAML(t *testing.T) {
 	streams := cmd.NewMemoryStreams("field: foo")

From a15dceea480ef65140e0098dc7f71a76535907c1 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 16 Jan 2018 16:51:37 +0000
Subject: [PATCH 178/227] Ask trust password only once when joining a cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/main_init.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/main_init.go b/lxd/main_init.go
index a39d03e2d..5f9d866ed 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -814,7 +814,7 @@ func (cmd *CmdInit) askClustering() (*cmdInitClusteringParams, error) {
 join:
 	targetAddress := cmd.Context.AskString("IP address or FQDN of an existing cluster node: ", "", nil)
 	params.TargetAddress = util.CanonicalNetworkAddress(targetAddress)
-	params.TargetPassword = cmd.Context.AskPassword(
+	params.TargetPassword = cmd.Context.AskPasswordOnce(
 		"Trust password for the existing cluster: ", cmd.PasswordReader)
 
 	url := fmt.Sprintf("https://%s", params.TargetAddress)

From a127f57bf286e3c3844bd51df2ea57b3c35b9bcf Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 17 Jan 2018 08:38:37 +0000
Subject: [PATCH 179/227] Improve logging when failing to add an operation to
 the db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/operations.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/operations.go b/lxd/operations.go
index 65a0fef18..e1c68bd5f 100644
--- a/lxd/operations.go
+++ b/lxd/operations.go
@@ -435,7 +435,7 @@ func operationCreate(cluster *db.Cluster, opClass operationClass, opResources ma
 		return err
 	})
 	if err != nil {
-		return nil, errors.Wrap(err, "failed to add operation to database")
+		return nil, errors.Wrapf(err, "failed to add operation %s to database", op.id)
 	}
 
 	logger.Debugf("New %s operation: %s", op.class.String(), op.id)

From 76692c92cad710aaaa8d11cbb80c29a3b21564cd Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 17 Jan 2018 08:50:44 +0000
Subject: [PATCH 180/227] Setup MAAS again after a nodes joins the cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go | 27 ++++++++++++++++++++++++++-
 1 file changed, 26 insertions(+), 1 deletion(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index fe3b8af66..266e88e33 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -366,7 +366,32 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 			nodes[i].ID = node.ID
 			nodes[i].Address = node.Address
 		}
-		return cluster.Join(d.State(), d.gateway, cert, req.Name, nodes)
+
+		err = cluster.Join(d.State(), d.gateway, cert, req.Name, nodes)
+		if err != nil {
+			return err
+		}
+
+		// FIXME: special case handling MAAS connection if the config
+		// in the cluster is different than what we had locally before
+		// joining. Ideally this should be something transparent or
+		// more generic, perhaps triggering some parts of Daemon.Init.
+		var config *cluster.Config
+		err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+			var err error
+			config, err = cluster.ConfigLoad(tx)
+			return err
+		})
+		if err != nil {
+			return err
+		}
+		url, key, machine := config.MAASController()
+		err = d.setupMAASController(url, key, machine)
+		if err != nil {
+			return err
+		}
+		return nil
+
 	}
 	resources := map[string][]string{}
 	resources["cluster"] = []string{}

From acc86b475d7f5b6f21ab9991d2135c012958d6ad Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 17 Jan 2018 12:41:33 +0000
Subject: [PATCH 181/227] Improve error message when storage or nework configs
 don't match

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go      | 16 ++++++++--------
 lxd/util/config.go      | 43 ++++++++++++++++++++++++++++++++++++++-----
 lxd/util/config_test.go | 41 +++++++++++++++++++++++++++++++++++++++++
 3 files changed, 87 insertions(+), 13 deletions(-)
 create mode 100644 lxd/util/config_test.go

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 266e88e33..1e4a886d3 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -242,10 +242,10 @@ func clusterCheckStoragePoolsMatch(cluster *db.Cluster, reqPools []api.StoragePo
 				return fmt.Errorf("Mismatching driver for storage pool %s", name)
 			}
 			// Exclude the "source" key, which is node-specific.
-			delete(pool.Config, "source")
-			delete(reqPool.Config, "source")
-			if !util.CompareConfigs(pool.Config, reqPool.Config) {
-				return fmt.Errorf("Mismatching config for storage pool %s", name)
+			exclude := []string{"source"}
+			err = util.CompareConfigs(pool.Config, reqPool.Config, exclude)
+			if err != nil {
+				return fmt.Errorf("Mismatching config for storage pool %s: %v", name, err)
 			}
 			break
 		}
@@ -273,10 +273,10 @@ func clusterCheckNetworksMatch(cluster *db.Cluster, reqNetworks []api.Network) e
 				return err
 			}
 			// Exclude the "bridge.external_interfaces" key, which is node-specific.
-			delete(network.Config, "bridge.external_interfaces")
-			delete(reqNetwork.Config, "bridge.external_interfaces")
-			if !util.CompareConfigs(network.Config, reqNetwork.Config) {
-				return fmt.Errorf("Mismatching config for network %s", name)
+			exclude := []string{"bridge.external_interfaces"}
+			err = util.CompareConfigs(network.Config, reqNetwork.Config, exclude)
+			if err != nil {
+				return fmt.Errorf("Mismatching config for network %s: %v", name, err)
 			}
 			break
 		}
diff --git a/lxd/util/config.go b/lxd/util/config.go
index 782dca01e..4fb3dd6c8 100644
--- a/lxd/util/config.go
+++ b/lxd/util/config.go
@@ -1,16 +1,49 @@
 package util
 
-// CompareConfigs compares two config maps and returns true if they are equal.
-func CompareConfigs(config1, config2 map[string]string) bool {
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	"github.com/lxc/lxd/shared"
+)
+
+// CompareConfigs compares two config maps and returns an error if they differ.
+func CompareConfigs(config1, config2 map[string]string, exclude []string) error {
+	if exclude == nil {
+		exclude = []string{}
+	}
+
+	delta := []string{}
 	for key, value := range config1 {
+		if shared.StringInSlice(key, exclude) {
+			continue
+		}
 		if config2[key] != value {
-			return false
+			delta = append(delta, key)
 		}
 	}
 	for key, value := range config2 {
+		if shared.StringInSlice(key, exclude) {
+			continue
+		}
 		if config1[key] != value {
-			return false
+			present := false
+			for i := range delta {
+				if delta[i] == key {
+					present = true
+				}
+				break
+			}
+			if !present {
+				delta = append(delta, key)
+			}
 		}
 	}
-	return true
+	sort.Strings(delta)
+	if len(delta) > 0 {
+		return fmt.Errorf("different values for keys: %s", strings.Join(delta, ", "))
+	}
+
+	return nil
 }
diff --git a/lxd/util/config_test.go b/lxd/util/config_test.go
new file mode 100644
index 000000000..4373149d8
--- /dev/null
+++ b/lxd/util/config_test.go
@@ -0,0 +1,41 @@
+package util_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/mpvl/subtest"
+	"github.com/stretchr/testify/assert"
+)
+
+func Test_CompareConfigsMismatch(t *testing.T) {
+	cases := []struct {
+		config1 map[string]string
+		config2 map[string]string
+		error   string
+	}{
+		{
+			map[string]string{"foo": "bar"},
+			map[string]string{"foo": "egg"},
+			"different values for keys: foo",
+		},
+		{
+			map[string]string{"foo": "bar"},
+			map[string]string{"egg": "buz"},
+			"different values for keys: egg, foo",
+		},
+	}
+	for _, c := range cases {
+		subtest.Run(t, c.error, func(t *testing.T) {
+			err := util.CompareConfigs(c.config1, c.config2, nil)
+			assert.EqualError(t, err, c.error)
+		})
+	}
+}
+
+func Test_CompareConfigs(t *testing.T) {
+	config1 := map[string]string{"foo": "bar", "baz": "buz"}
+	config2 := map[string]string{"foo": "egg", "baz": "buz"}
+	err := util.CompareConfigs(config1, config2, []string{"foo"})
+	assert.NoError(t, err)
+}

From 72c8544bf9aaa73ff74b29adf8efd8441bd32041 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 17 Jan 2018 13:14:18 +0000
Subject: [PATCH 182/227] Make volatile.initial_source a node-specific storage
 config key

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go        |  4 ++--
 lxd/cluster/membership.go | 10 +++++++---
 lxd/db/migration.go       | 17 ++++++++++++-----
 lxd/db/migration_test.go  | 22 ++++++++++++++++++++++
 lxd/db/storage_pools.go   |  9 ++++++++-
 lxd/storage_pools.go      | 23 +++++++++++++----------
 6 files changed, 64 insertions(+), 21 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 1e4a886d3..c7f096721 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -241,8 +241,8 @@ func clusterCheckStoragePoolsMatch(cluster *db.Cluster, reqPools []api.StoragePo
 			if pool.Driver != reqPool.Driver {
 				return fmt.Errorf("Mismatching driver for storage pool %s", name)
 			}
-			// Exclude the "source" key, which is node-specific.
-			exclude := []string{"source"}
+			// Exclude the keys which are node-specific.
+			exclude := db.StoragePoolNodeConfigKeys
 			err = util.CompareConfigs(pool.Config, reqPool.Config, exclude)
 			if err != nil {
 				return fmt.Errorf("Mismatching config for storage pool %s: %v", name, err)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index fec67dda6..8560d9540 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -333,9 +333,13 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 			if err != nil {
 				return errors.Wrap(err, "failed to add joining node's to the pool")
 			}
-			// We only need to add the source key, since the other keys are global and
-			// are already there.
-			config = map[string]string{"source": config["source"]}
+			// We only need to add the node-specific keys, since
+			// the other keys are global and are already there.
+			for key := range config {
+				if !shared.StringInSlice(key, db.StoragePoolNodeConfigKeys) {
+					delete(config, key)
+				}
+			}
 			err = tx.StoragePoolConfigAdd(id, node.ID, config)
 			if err != nil {
 				return errors.Wrap(err, "failed to add joining node's pool config")
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index e09131706..f78821aea 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -127,15 +127,22 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 				}
 				appendNodeID()
 			case "storage_pools_config":
-				// The "source" config key is the only one
-				// which is not global to the cluster, so all
-				// other keys will have a NULL node_id.
+				// The keys listed in StoragePoolNodeConfigKeys
+				// are the only ones which are not global to the
+				// cluster, so all other keys will have a NULL
+				// node_id.
+				index := 0
 				for i, column := range columns {
-					if column == "key" && row[i] != "source" {
-						nullNodeID = true
+					if column == "key" {
+						index = i
 						break
 					}
 				}
+				key := row[index].(string)
+				if !shared.StringInSlice(key, StoragePoolNodeConfigKeys) {
+					nullNodeID = true
+					break
+				}
 				appendNodeID()
 			case "storage_volumes_config":
 				appendNodeID()
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index c97e17cd3..561a76dbb 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -5,6 +5,7 @@ import (
 	"testing"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
@@ -83,6 +84,7 @@ func TestImportPreClusteringData(t *testing.T) {
 	assert.Equal(t, int64(1), id)
 	assert.Equal(t, "/foo/bar", pool.Config["source"])
 	assert.Equal(t, "123", pool.Config["size"])
+	assert.Equal(t, "/foo/bar", pool.Config["volatile.initial_source"])
 	assert.Equal(t, "CREATED", pool.State)
 	assert.Equal(t, []string{"none"}, pool.Nodes)
 	volumes, err := cluster.StoragePoolVolumesGet(id, []int{1})
@@ -90,6 +92,25 @@ func TestImportPreClusteringData(t *testing.T) {
 	assert.Len(t, volumes, 1)
 	assert.Equal(t, "/foo/bar", volumes[0].Config["source"])
 
+	err = cluster.Transaction(func(tx *db.ClusterTx) error {
+		// The size config got a NULL node_id, since it's cluster global.
+		config, err := query.SelectConfig(tx.Tx(), "storage_pools_config", "node_id IS NULL")
+		require.NoError(t, err)
+		assert.Equal(t, map[string]string{"size": "123"}, config)
+
+		// The other config keys are node-specific.
+		config, err = query.SelectConfig(tx.Tx(), "storage_pools_config", "node_id=?", 1)
+		require.NoError(t, err)
+		assert.Equal(t,
+			map[string]string{
+				"volatile.initial_source": "/foo/bar",
+				"source":                  "/foo/bar",
+			}, config)
+
+		return nil
+	})
+	require.NoError(t, err)
+
 	// profiles
 	profiles, err := cluster.Profiles()
 	require.NoError(t, err)
@@ -148,6 +169,7 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 		"INSERT INTO storage_pools VALUES (1, 'default', 'dir', '')",
 		"INSERT INTO storage_pools_config VALUES(1, 1, 'source', '/foo/bar')",
 		"INSERT INTO storage_pools_config VALUES(2, 1, 'size', '123')",
+		"INSERT INTO storage_pools_config VALUES(3, 1, 'volatile.initial_source', '/foo/bar')",
 		"INSERT INTO storage_volumes VALUES (1, 'dev', 1, 1, '')",
 		"INSERT INTO storage_volumes_config VALUES(1, 1, 'source', '/foo/bar')",
 	}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index fe3e259cc..a4a5754d9 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -471,7 +471,7 @@ func storagePoolConfigAdd(tx *sql.Tx, poolID, nodeID int64, poolConfig map[strin
 			continue
 		}
 		var nodeIDValue interface{}
-		if k != "source" {
+		if !shared.StringInSlice(k, StoragePoolNodeConfigKeys) {
 			nodeIDValue = nil
 		} else {
 			nodeIDValue = nodeID
@@ -789,6 +789,13 @@ const (
 	StoragePoolVolumeTypeNameCustom    string = "custom"
 )
 
+// StoragePoolNodeConfigKeys lists all storage pool config keys which are
+// node-specific.
+var StoragePoolNodeConfigKeys = []string{
+	"source",
+	"volatile.initial_source",
+}
+
 // StoragePoolVolumeTypeToName converts a volume integer type code to its
 // human-readable name.
 func StoragePoolVolumeTypeToName(volumeType int) (string, error) {
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 87b4feafc..509278851 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -13,6 +13,7 @@ import (
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/version"
 )
@@ -131,10 +132,10 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// A targetNode was specified, let's just define the node's storage
-	// without actually creating it. The only legal key value for the
-	// storage config is 'source'.
+	// without actually creating it. The only legal key values for the
+	// storage config are the ones in StoragePoolNodeConfigKeys.
 	for key := range req.Config {
-		if key != "source" {
+		if !shared.StringInSlice(key, db.StoragePoolNodeConfigKeys) {
 			return SmartError(fmt.Errorf("Invalid config key '%s'", key))
 		}
 	}
@@ -153,11 +154,10 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 }
 
 func storagePoolsPostCluster(d *Daemon, req api.StoragePoolsPost) error {
-	// Check that no 'source' config key has been defined, since
-	// that's node-specific.
+	// Check that no node-specific config key has been defined.
 	for key := range req.Config {
-		if key == "source" {
-			return fmt.Errorf("Config key 'source' is node-specific")
+		if shared.StringInSlice(key, db.StoragePoolNodeConfigKeys) {
+			return fmt.Errorf("Config key '%s' is node-specific", key)
 		}
 	}
 
@@ -257,14 +257,17 @@ func storagePoolGet(d *Daemon, r *http.Request) Response {
 
 	targetNode := r.FormValue("targetNode")
 
-	// If no target node is specified and the client is clustered, we omit
-	// the node-specific fields, namely "source"
 	clustered, err := cluster.Enabled(d.db)
 	if err != nil {
 		return SmartError(err)
 	}
+
+	// If no target node is specified and the client is clustered, we omit
+	// the node-specific fields.
 	if targetNode == "" && clustered {
-		delete(pool.Config, "source")
+		for _, key := range db.StoragePoolNodeConfigKeys {
+			delete(pool.Config, key)
+		}
 	}
 
 	// If a target was specified, forward the request to the relevant node.

From c3adf15065ceb96827efe2ec1a5f5fbdffad9f68 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 17 Jan 2018 16:16:16 +0000
Subject: [PATCH 183/227] Don't return node-specific keys in storage pools of
 GET /1.0/cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go      |  6 +++++-
 lxd/api_cluster_test.go | 30 ++++++++++++++++++++++++++++++
 2 files changed, 35 insertions(+), 1 deletion(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index c7f096721..23f340393 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -57,7 +57,7 @@ func clusterGet(d *Daemon, r *http.Request) Response {
 
 	// Fill the StoragePools attribute
 	pools, err := d.cluster.StoragePools()
-	if err != nil {
+	if err != nil && err != db.NoSuchObjectError {
 		return SmartError(err)
 	}
 	for _, name := range pools {
@@ -65,6 +65,10 @@ func clusterGet(d *Daemon, r *http.Request) Response {
 		if err != nil {
 			return SmartError(err)
 		}
+		// Remove node-specific keys
+		for _, key := range db.StoragePoolNodeConfigKeys {
+			delete(pool.Config, key)
+		}
 		cluster.StoragePools = append(cluster.StoragePools, *pool)
 	}
 
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 315f13b7c..3104968cf 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -2,6 +2,8 @@ package main
 
 import (
 	"fmt"
+	"os"
+	"path/filepath"
 	"testing"
 
 	lxd "github.com/lxc/lxd/client"
@@ -33,6 +35,34 @@ func TestCluster_Bootstrap(t *testing.T) {
 	assert.Equal(t, "buzz", client.ClusterNodeName())
 }
 
+func TestCluster_Get(t *testing.T) {
+	daemon, cleanup := newDaemon(t)
+	defer cleanup()
+
+	client, err := lxd.ConnectLXDUnix(daemon.UnixSocket(), nil)
+	require.NoError(t, err)
+
+	// Create a pool and check that the information returned by GetCluster
+	// does not contain node-specific keys.
+	os.Setenv("LXD_DIR", filepath.Join(daemon.State().OS.VarDir))
+	pool := api.StoragePoolsPost{
+		Name:   "mypool",
+		Driver: "dir",
+	}
+	pool.Config = map[string]string{
+		"source": "",
+	}
+	err = client.CreateStoragePool(pool)
+	require.NoError(t, err)
+
+	cluster, err := client.GetCluster("")
+	require.NoError(t, err)
+	assert.Len(t, cluster.StoragePools, 1)
+
+	_, ok := cluster.StoragePools[0].Config["source"]
+	assert.False(t, ok, "should have not contained the node-specific 'source' key")
+}
+
 // A LXD node which is already configured for networking can join an existing
 // cluster.
 func TestCluster_Join(t *testing.T) {

From 744a3133cb5b8c34faf83f764572e4f2cba5b935 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 18 Jan 2018 08:48:08 +0000
Subject: [PATCH 184/227] Close and re-open the cluster database when a node
 leaves

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go        | 16 ++++++++++++++++
 lxd/api_cluster_test.go   |  9 +++++++++
 test/suites/clustering.sh |  1 +
 3 files changed, 26 insertions(+)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 23f340393..831c88213 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -77,6 +77,12 @@ func clusterGet(d *Daemon, r *http.Request) Response {
 
 // Disable clustering on a node.
 func clusterDelete(d *Daemon, r *http.Request) Response {
+	// Close the cluster database
+	err := d.cluster.Close()
+	if err != nil {
+		return SmartError(err)
+	}
+
 	// Update our TLS configuration using our original certificate.
 	for _, suffix := range []string{"crt", "key", "ca"} {
 		path := filepath.Join(d.os.VarDir, "cluster."+suffix)
@@ -100,6 +106,16 @@ func clusterDelete(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	// Re-open the cluster database
+	address, err := node.HTTPSAddress(d.db)
+	if err != nil {
+		return SmartError(err)
+	}
+	d.cluster, err = db.OpenCluster("db.bin", d.gateway.Dialer(), address)
+	if err != nil {
+		return SmartError(err)
+	}
+
 	return EmptySyncResponse
 }
 
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 3104968cf..7962ef63e 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -200,6 +200,15 @@ func TestCluster_Leave(t *testing.T) {
 	client := f.ClientUnix(daemons[1])
 	err := client.LeaveCluster("rusp-0", false)
 	require.NoError(t, err)
+
+	_, _, err = client.GetServer()
+	require.NoError(t, err)
+	assert.False(t, client.IsClustered())
+
+	nodes, err := client.GetNodes()
+	require.NoError(t, err)
+	assert.Len(t, nodes, 1)
+	assert.Equal(t, "none", nodes[0].Name)
 }
 
 // A LXD node can be renamed.
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 6651544b9..917e1dbb0 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -88,6 +88,7 @@ test_clustering_membership() {
 
   # Remove a node gracefully.
   LXD_DIR="${LXD_FOUR_DIR}" lxc cluster delete node5
+  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster list | grep -q "https://0.0.0.0"
 
   LXD_DIR="${LXD_FOUR_DIR}" lxd shutdown
   LXD_DIR="${LXD_THREE_DIR}" lxd shutdown

From 3e2290feed9f9cea7254f4cddcdfd7b4be44f74f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 18 Jan 2018 12:01:15 +0000
Subject: [PATCH 185/227] Improve error message when trying to delete a
 non-empty node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go |  6 ++---
 lxd/db/node.go            | 58 +++++++++++++++++++++++++++++++++++++----------
 lxd/db/node_test.go       | 48 ++++++++++++++++++++++++++++++++-------
 3 files changed, 89 insertions(+), 23 deletions(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 8560d9540..17f227e92 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -651,12 +651,12 @@ func membershipCheckClusterStateForAccept(tx *db.ClusterTx, name string, address
 // Check that cluster-related preconditions are met for leaving a cluster.
 func membershipCheckClusterStateForLeave(tx *db.ClusterTx, nodeID int64) error {
 	// Check that it has no containers or images.
-	empty, err := tx.NodeIsEmpty(nodeID)
+	message, err := tx.NodeIsEmpty(nodeID)
 	if err != nil {
 		return err
 	}
-	if !empty {
-		return fmt.Errorf("node has containers or images")
+	if message != "" {
+		return fmt.Errorf(message)
 	}
 
 	// Check that it's not the last node.
diff --git a/lxd/db/node.go b/lxd/db/node.go
index cfd1a03a1..e0c0cec7f 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -3,6 +3,7 @@ package db
 import (
 	"fmt"
 	"strconv"
+	"strings"
 	"time"
 
 	"github.com/lxc/lxd/lxd/db/cluster"
@@ -233,26 +234,59 @@ func (c *ClusterTx) NodeHeartbeat(address string, heartbeat time.Time) error {
 	return nil
 }
 
-// NodeIsEmpty returns true if the node with the given ID has no containers or
-// images associated with it.
-func (c *ClusterTx) NodeIsEmpty(id int64) (bool, error) {
-	n, err := query.Count(c.tx, "containers", "node_id=?", id)
+// NodeIsEmpty returns an empty string if the node with the given ID has no
+// containers or images associated with it. Otherwise, it returns a message
+// say what's left.
+func (c *ClusterTx) NodeIsEmpty(id int64) (string, error) {
+	containers, err := query.SelectStrings(c.tx, "SELECT name FROM containers WHERE node_id=?", id)
 	if err != nil {
-		return false, errors.Wrapf(err, "failed to get containers count for node %d", id)
+		return "", errors.Wrapf(err, "failed to get containers for node %d", id)
 	}
-	if n > 0 {
-		return false, nil
+	if len(containers) > 0 {
+		message := fmt.Sprintf(
+			"node still has the following containers: %s", strings.Join(containers, ", "))
+		return message, nil
 	}
 
-	n, err = query.Count(c.tx, "images_nodes", "node_id=?", id)
+	images := []struct {
+		fingerprint string
+		nodeID      int64
+	}{}
+	dest := func(i int) []interface{} {
+		images = append(images, struct {
+			fingerprint string
+			nodeID      int64
+		}{})
+		return []interface{}{&images[i].fingerprint, &images[i].nodeID}
+
+	}
+	err = query.SelectObjects(c.tx, dest, `
+SELECT fingerprint, node_id FROM images JOIN images_nodes ON images.id=images_nodes.image_id`)
 	if err != nil {
-		return false, errors.Wrapf(err, "failed to get images count for node %d", id)
+		return "", errors.Wrapf(err, "failed to get image list for node %d", id)
 	}
-	if n > 0 {
-		return false, nil
+	index := map[string][]int64{} // Map fingerprints to IDs of nodes
+	for _, image := range images {
+		index[image.fingerprint] = append(index[image.fingerprint], image.nodeID)
+	}
+
+	fingerprints := []string{}
+	for fingerprint, ids := range index {
+		if len(ids) > 1 {
+			continue
+		}
+		if ids[0] == id {
+			fingerprints = append(fingerprints, fingerprint)
+		}
+	}
+
+	if len(fingerprints) > 0 {
+		message := fmt.Sprintf(
+			"node still has the following images: %s", strings.Join(fingerprints, ", "))
+		return message, nil
 	}
 
-	return true, nil
+	return "", nil
 }
 
 // NodeClear removes any container or image associated with this node.
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 619c2ed73..7c4bf36fa 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -124,31 +124,63 @@ func TestNodeHeartbeat(t *testing.T) {
 	assert.True(t, node.IsOffline(20*time.Second))
 }
 
-// A node is considered empty only if it has no containers and no images.
-func TestNodeIsEmpty(t *testing.T) {
+// A node is considered empty only if it has no containers.
+func TestNodeIsEmpty_Containers(t *testing.T) {
 	tx, cleanup := db.NewTestClusterTx(t)
 	defer cleanup()
 
 	id, err := tx.NodeAdd("buzz", "1.2.3.4:666")
 	require.NoError(t, err)
 
-	empty, err := tx.NodeIsEmpty(id)
+	message, err := tx.NodeIsEmpty(id)
 	require.NoError(t, err)
-	assert.True(t, empty)
+	assert.Equal(t, "", message)
 
 	_, err = tx.Tx().Exec(`
 INSERT INTO containers (id, node_id, name, architecture, type) VALUES (1, ?, 'foo', 1, 1)
 `, id)
 	require.NoError(t, err)
 
-	empty, err = tx.NodeIsEmpty(id)
+	message, err = tx.NodeIsEmpty(id)
 	require.NoError(t, err)
-	assert.False(t, empty)
+	assert.Equal(t, "node still has the following containers: foo", message)
 
 	err = tx.NodeClear(id)
 	require.NoError(t, err)
 
-	empty, err = tx.NodeIsEmpty(id)
+	message, err = tx.NodeIsEmpty(id)
 	require.NoError(t, err)
-	assert.True(t, empty)
+	assert.Equal(t, "", message)
+}
+
+// A node is considered empty only if it has no images that are available only
+// on that node.
+func TestNodeIsEmpty_Images(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	id, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	_, err = tx.Tx().Exec(`
+INSERT INTO images (id, fingerprint, filename, size, architecture, upload_date)
+  VALUES (1, 'abc', 'foo', 123, 1, ?)`, time.Now())
+	require.NoError(t, err)
+
+	_, err = tx.Tx().Exec(`
+INSERT INTO images_nodes(image_id, node_id) VALUES(1, ?)`, id)
+	require.NoError(t, err)
+
+	message, err := tx.NodeIsEmpty(id)
+	require.NoError(t, err)
+	assert.Equal(t, "node still has the following images: abc", message)
+
+	// Insert a new image entry for node 1 (the default node).
+	_, err = tx.Tx().Exec(`
+INSERT INTO images_nodes(image_id, node_id) VALUES(1, 1)`)
+	require.NoError(t, err)
+
+	message, err = tx.NodeIsEmpty(id)
+	require.NoError(t, err)
+	assert.Equal(t, "", message)
 }

From d17affd97b65d51586765a3d4682ae6d09c5730c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 18 Jan 2018 12:24:42 +0000
Subject: [PATCH 186/227] Delete images left with no copies in the cluster when
 a node leaves

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster_test.go   | 60 +++++++++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/membership.go |  8 ++++---
 lxd/db/node.go            | 24 +++++++++++++++++++
 3 files changed, 89 insertions(+), 3 deletions(-)

diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 7962ef63e..cad349962 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -5,6 +5,7 @@ import (
 	"os"
 	"path/filepath"
 	"testing"
+	"time"
 
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
@@ -211,6 +212,65 @@ func TestCluster_Leave(t *testing.T) {
 	assert.Equal(t, "none", nodes[0].Name)
 }
 
+// A node can't leave a cluster gracefully if it still has images associated
+// with it.
+func TestCluster_LeaveWithImages(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping cluster leave test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	daemon := daemons[1]
+	err := daemon.State().Cluster.ImageInsert(
+		"abc", "foo", 123, false, false, "amd64", time.Now(), time.Now(), nil)
+	require.NoError(t, err)
+
+	client := f.ClientUnix(daemons[1])
+	err = client.LeaveCluster("rusp-0", false)
+	assert.EqualError(t, err, "node still has the following images: abc")
+
+	// If we now associate the image with the other node as well, leaving
+	// the cluster is fine.
+	daemon = daemons[0]
+	err = daemon.State().Cluster.ImageAssociateNode("abc")
+	require.NoError(t, err)
+
+	err = client.LeaveCluster("rusp-0", false)
+	assert.NoError(t, err)
+}
+
+// The force flag makes a node leave also if it still has images.
+func TestCluster_LeaveForce(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping cluster leave test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	daemon := daemons[1]
+	err := daemon.State().Cluster.ImageInsert(
+		"abc", "foo", 123, false, false, "amd64", time.Now(), time.Now(), nil)
+	require.NoError(t, err)
+
+	client := f.ClientUnix(daemons[1])
+	err = client.LeaveCluster("rusp-0", true)
+	assert.NoError(t, err)
+
+	// The image is gone, since the deleted node was the only one having a
+	// copy of it.
+	daemon = daemons[0]
+	images, err := daemon.State().Cluster.ImagesGet(false)
+	require.NoError(t, err)
+	assert.Equal(t, []string{}, images)
+}
+
 // A LXD node can be renamed.
 func TestCluster_NodeRename(t *testing.T) {
 	daemon, cleanup := newDaemon(t)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 17f227e92..07f2573da 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -404,10 +404,12 @@ func Leave(state *state.State, gateway *Gateway, name string, force bool) (strin
 
 		// Check that the node is eligeable for leaving.
 		if !force {
-			err = membershipCheckClusterStateForLeave(tx, node.ID)
-		} else {
-			err = tx.NodeClear(node.ID)
+			err := membershipCheckClusterStateForLeave(tx, node.ID)
+			if err != nil {
+				return err
+			}
 		}
+		err = tx.NodeClear(node.ID)
 		if err != nil {
 			return err
 		}
diff --git a/lxd/db/node.go b/lxd/db/node.go
index e0c0cec7f..184a1d11d 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -238,6 +238,7 @@ func (c *ClusterTx) NodeHeartbeat(address string, heartbeat time.Time) error {
 // containers or images associated with it. Otherwise, it returns a message
 // say what's left.
 func (c *ClusterTx) NodeIsEmpty(id int64) (string, error) {
+	// Check if the node has any containers.
 	containers, err := query.SelectStrings(c.tx, "SELECT name FROM containers WHERE node_id=?", id)
 	if err != nil {
 		return "", errors.Wrapf(err, "failed to get containers for node %d", id)
@@ -248,6 +249,7 @@ func (c *ClusterTx) NodeIsEmpty(id int64) (string, error) {
 		return message, nil
 	}
 
+	// Check if the node has any images available only in.
 	images := []struct {
 		fingerprint string
 		nodeID      int64
@@ -296,11 +298,33 @@ func (c *ClusterTx) NodeClear(id int64) error {
 		return err
 	}
 
+	// Get the IDs of the images this node is hosting.
+	ids, err := query.SelectIntegers(c.tx, "SELECT image_id FROM images_nodes WHERE node_id=?", id)
+	if err != nil {
+		return err
+	}
+
+	// Delete the association
 	_, err = c.tx.Exec("DELETE FROM images_nodes WHERE node_id=?", id)
 	if err != nil {
 		return err
 	}
 
+	// Delete the image as well if this was the only node with it.
+	for _, id := range ids {
+		count, err := query.Count(c.tx, "images_nodes", "image_id=?", id)
+		if err != nil {
+			return err
+		}
+		if count > 0 {
+			continue
+		}
+		_, err = c.tx.Exec("DELETE FROM images WHERE id=?", id)
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 

From a464154c40916950e365fbf7b5b436381cfcdc74 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 18 Jan 2018 14:49:30 +0000
Subject: [PATCH 187/227] Remove networks and storage pools from deleted nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go             | 62 ++++++++++++++++++++++++++++++------------
 lxd/cluster/membership.go      | 42 ++++++++++++++++++++--------
 lxd/cluster/membership_test.go |  2 ++
 lxd/storage_pools.go           |  2 +-
 test/suites/clustering.sh      |  9 ++++++
 5 files changed, 87 insertions(+), 30 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 831c88213..eb8e8a8f6 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -484,38 +484,64 @@ func clusterNodeDelete(d *Daemon, r *http.Request) Response {
 		force = 0
 	}
 
+	// First check that the node is clear from containers and images and
+	// make it leave the database cluster, if it's part of it.
 	name := mux.Vars(r)["name"]
 	address, err := cluster.Leave(d.State(), d.gateway, name, force == 1)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	var run func(op *operation) error
-
-	if force == 1 {
-		// If the force flag is on, the returned operation is a no-op.
-		run = func(op *operation) error {
-			return nil
+	if force != 1 {
+		// Try to gracefully delete all networks and storage pools on it.
+		// Delete all networks on this node
+		cert := d.endpoints.NetworkCert()
+		client, err := cluster.Connect(address, cert, true)
+		if err != nil {
+			return SmartError(err)
+		}
+		networks, err := d.cluster.Networks()
+		if err != nil {
+			return SmartError(err)
+		}
+		for _, name := range networks {
+			err := client.DeleteNetwork(name)
+			if err != nil {
+				return SmartError(err)
+			}
 		}
 
-	} else {
-		// Try to gracefully disable clustering on the target node.
-		cert := d.endpoints.NetworkCert()
-		run = func(op *operation) error {
-			// First request for this node to be added to the list of
-			// cluster nodes.
-			client, err := cluster.Connect(address, cert, false)
+		// Delete all the pools on this node
+		pools, err := d.cluster.StoragePools()
+		if err != nil && err != db.NoSuchObjectError {
+			return SmartError(err)
+		}
+		for _, name := range pools {
+			err := client.DeleteStoragePool(name)
 			if err != nil {
-				return err
+				return SmartError(err)
 			}
-			_, _, err = client.RawQuery("DELETE", "/1.0/cluster", nil, "")
-			return err
 		}
 	}
 
-	err = run(nil)
+	// Remove node from the database
+	err = cluster.Purge(d.cluster, name)
 	if err != nil {
-		return SmartError(err)
+		return SmartError(errors.Wrap(err, "failed to remove node from database"))
 	}
+
+	if force != 1 {
+		// Try to gracefully reset the database on the node.
+		cert := d.endpoints.NetworkCert()
+		client, err := cluster.Connect(address, cert, false)
+		if err != nil {
+			return SmartError(err)
+		}
+		_, _, err = client.RawQuery("DELETE", "/1.0/cluster", nil, "")
+		if err != nil {
+			SmartError(errors.Wrap(err, "failed to cleanup the node"))
+		}
+	}
+
 	return EmptySyncResponse
 }
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 07f2573da..c42738ab1 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -388,12 +388,17 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 
 // Leave a cluster.
 //
-// If the force flag is true, the node will be removed even if it still has
+// If the force flag is true, the node will leave even if it still has
 // containers and images.
 //
+// The node will only leave the raft cluster, and won't be removed from the
+// database. That's done by Purge().
+//
 // Upon success, return the address of the leaving node.
 func Leave(state *state.State, gateway *Gateway, name string, force bool) (string, error) {
-	// Delete the node from the cluster and track its address.
+	logger.Debugf("Make node %s leave the cluster", name)
+
+	// Check if the node can be deleted and track its address.
 	var address string
 	err := state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		// Get the node (if it doesn't exists an error is returned).
@@ -409,16 +414,7 @@ func Leave(state *state.State, gateway *Gateway, name string, force bool) (strin
 				return err
 			}
 		}
-		err = tx.NodeClear(node.ID)
-		if err != nil {
-			return err
-		}
 
-		// Actually remove the node from the cluster database.
-		err = tx.NodeRemove(node.ID)
-		if err != nil {
-			return err
-		}
 		address = node.Address
 		return nil
 	})
@@ -468,6 +464,30 @@ func Leave(state *state.State, gateway *Gateway, name string, force bool) (strin
 	return address, nil
 }
 
+// Purge removes a node entirely from the cluster database.
+func Purge(cluster *db.Cluster, name string) error {
+	logger.Debugf("Remove node %s from the database", name)
+
+	return cluster.Transaction(func(tx *db.ClusterTx) error {
+		// Get the node (if it doesn't exists an error is returned).
+		node, err := tx.NodeByName(name)
+		if err != nil {
+			return errors.Wrapf(err, "failed to get node %s", name)
+		}
+
+		err = tx.NodeClear(node.ID)
+		if err != nil {
+			return errors.Wrapf(err, "failed to clear node %s", name)
+		}
+
+		err = tx.NodeRemove(node.ID)
+		if err != nil {
+			return errors.Wrapf(err, "failed to remove node %s", name)
+		}
+		return nil
+	})
+}
+
 // List the nodes of the cluster.
 func List(state *state.State) ([]api.Node, error) {
 	addresses := []string{} // Addresses of database nodes
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 4e4bf362f..cd72744d6 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -325,6 +325,8 @@ func TestJoin(t *testing.T) {
 	leaving, err := cluster.Leave(state, gateway, "rusp", false /* force */)
 	require.NoError(t, err)
 	assert.Equal(t, address, leaving)
+	err = cluster.Purge(state.Cluster, "rusp")
+	require.NoError(t, err)
 
 	// The node has gone from the cluster db.
 	err = targetState.Cluster.Transaction(func(tx *db.ClusterTx) error {
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 509278851..4611ae648 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -410,7 +410,7 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
-	if len(profiles) > 0 {
+	if len(profiles) > 0 && !isClusterNotification(r) {
 		return BadRequest(fmt.Errorf("Storage pool \"%s\" has profiles using it:\n%s", poolName, strings.Join(profiles, "\n")))
 	}
 
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 917e1dbb0..fbde7fb24 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -86,6 +86,12 @@ test_clustering_membership() {
   # Rename a node using the pre-existing name.
   LXD_DIR="${LXD_THREE_DIR}" lxc cluster rename node4 node5
 
+  # Trying to delete a container which is the only one with a copy of
+  # an image results in an error
+  LXD_DIR="${LXD_FOUR_DIR}" ensure_import_testimage
+  ! LXD_DIR="${LXD_FOUR_DIR}" lxc cluster delete node5
+  LXD_DIR="${LXD_TWO_DIR}" lxc image delete testimage
+
   # Remove a node gracefully.
   LXD_DIR="${LXD_FOUR_DIR}" lxc cluster delete node5
   LXD_DIR="${LXD_FOUR_DIR}" lxc cluster list | grep -q "https://0.0.0.0"
@@ -150,6 +156,9 @@ test_clustering_containers() {
   LXD_DIR="${LXD_TWO_DIR}" lxc info foo | grep -q "Status: Running"
   LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q RUNNING
 
+  # Trying to delete a node which has container results in an error
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc cluster delete node2
+
   # Exec a command in the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc exec foo ls / | grep -q linuxrc
 

From b287fa7e44a53142423e87ba00002fd90169ad30 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 07:28:04 +0000
Subject: [PATCH 188/227] Make lxc cluster return an an error if the
 sub-command is unknown

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/cluster.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lxc/cluster.go b/lxc/cluster.go
index e21c4f1e6..24109b42a 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -59,9 +59,9 @@ func (c *clusterCmd) run(conf *config.Config, args []string) error {
 		return c.doClusterNodeRename(conf, args)
 	case "delete":
 		return c.doClusterNodeDelete(conf, args)
+	default:
+		return errArgs
 	}
-
-	return nil
 }
 
 func (c *clusterCmd) doClusterNodeShow(conf *config.Config, args []string) error {

From 9cd26a447751748d75c42d73ef9df9071d551e91 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 07:38:34 +0000
Subject: [PATCH 189/227] Skip checks about deleting a pool for internal
 cluster requests

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/storage_pools.go | 49 ++++++++++++++++++++++++++++++-------------------
 1 file changed, 30 insertions(+), 19 deletions(-)

diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 4611ae648..63ffca8da 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -393,25 +393,13 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 		return NotFound
 	}
 
-	// Check if the storage pool has any volumes associated with it, if so
-	// error out.
-	volumeCount, err := d.cluster.StoragePoolVolumesGetNames(poolID)
-	if err != nil {
-		return InternalError(err)
-	}
-
-	if volumeCount > 0 {
-		return BadRequest(fmt.Errorf("storage pool \"%s\" has volumes attached to it", poolName))
-	}
-
-	// Check if the storage pool is still referenced in any profiles.
-	profiles, err := profilesUsingPoolGetNames(d.cluster, poolName)
-	if err != nil {
-		return SmartError(err)
-	}
-
-	if len(profiles) > 0 && !isClusterNotification(r) {
-		return BadRequest(fmt.Errorf("Storage pool \"%s\" has profiles using it:\n%s", poolName, strings.Join(profiles, "\n")))
+	// If this is not an internal cluster request, check if the storage
+	// pool has any volumes associated with it, if so error out.
+	if !isClusterNotification(r) {
+		response := storagePoolDeleteCheckPreconditions(d.cluster, poolName, poolID)
+		if response != nil {
+			return response
+		}
 	}
 
 	s, err := storagePoolInit(d.State(), poolName)
@@ -460,4 +448,27 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
+func storagePoolDeleteCheckPreconditions(cluster *db.Cluster, poolName string, poolID int64) Response {
+	volumeCount, err := cluster.StoragePoolVolumesGetNames(poolID)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	if volumeCount > 0 {
+		return BadRequest(fmt.Errorf("storage pool \"%s\" has volumes attached to it", poolName))
+	}
+
+	// Check if the storage pool is still referenced in any profiles.
+	profiles, err := profilesUsingPoolGetNames(cluster, poolName)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	if len(profiles) > 0 {
+		return BadRequest(fmt.Errorf("Storage pool \"%s\" has profiles using it:\n%s", poolName, strings.Join(profiles, "\n")))
+	}
+
+	return nil
+}
+
 var storagePoolCmd = Command{name: "storage-pools/{name}", get: storagePoolGet, put: storagePoolPut, patch: storagePoolPatch, delete: storagePoolDelete}

From 250a2e850cd6746d61d6624324ddc25ecfe15d63 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 08:08:58 +0000
Subject: [PATCH 190/227] Better error when creating a pool/network not pending
 on any node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_networks_test.go      | 19 +++++++++++++++++++
 lxd/api_storage_pools_test.go | 26 ++++++++++++++++++++------
 lxd/networks.go               |  3 +++
 lxd/storage_pools.go          |  3 +++
 4 files changed, 45 insertions(+), 6 deletions(-)

diff --git a/lxd/api_networks_test.go b/lxd/api_networks_test.go
index 90e71bb15..1ad93500f 100644
--- a/lxd/api_networks_test.go
+++ b/lxd/api_networks_test.go
@@ -36,6 +36,25 @@ func TestNetworksCreate_TargetNode(t *testing.T) {
 	assert.Equal(t, []string{"rusp-0"}, network.Nodes)
 }
 
+// An error is returned when trying to create a new network in a cluster
+// where the network was not defined on any node nodes.
+func TestNetworksCreate_NotDefined(t *testing.T) {
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	// Trying to create the pool now results in an error, since it's not
+	// defined on any node.
+	networkPost := api.NetworksPost{
+		Name: "mynetwork",
+	}
+	client := f.ClientUnix(daemons[0])
+	err := client.CreateNetwork(networkPost)
+	require.EqualError(t, err, "Network not pending on any node (use --target <node> first)")
+}
+
 // An error is returned when trying to create a new network in a cluster where
 // the network was not defined on all nodes.
 func TestNetworksCreate_MissingNodes(t *testing.T) {
diff --git a/lxd/api_storage_pools_test.go b/lxd/api_storage_pools_test.go
index c4cf3adbd..a268a1a30 100644
--- a/lxd/api_storage_pools_test.go
+++ b/lxd/api_storage_pools_test.go
@@ -10,9 +10,6 @@ import (
 
 // Create a new pending storage pool using the targetNode query paramenter.
 func TestStoragePoolsCreate_TargetNode(t *testing.T) {
-	if testing.Short() {
-		t.Skip("skipping storage-pools targetNode test in short mode.")
-	}
 	daemons, cleanup := newDaemons(t, 2)
 	defer cleanup()
 
@@ -39,12 +36,29 @@ func TestStoragePoolsCreate_TargetNode(t *testing.T) {
 	assert.Equal(t, "PENDING", pool.State)
 }
 
+// An error is returned when trying to create a new storage pool in a cluster
+// where the pool was not defined on any node nodes.
+func TestStoragePoolsCreate_NotDefined(t *testing.T) {
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	// Trying to create the pool now results in an error, since it's not
+	// defined on any node.
+	poolPost := api.StoragePoolsPost{
+		Name:   "mypool",
+		Driver: "dir",
+	}
+	client := f.ClientUnix(daemons[0])
+	err := client.CreateStoragePool(poolPost)
+	require.EqualError(t, err, "Pool not pending on any node (use --target <node> first)")
+}
+
 // An error is returned when trying to create a new storage pool in a cluster
 // where the pool was not defined on all nodes.
 func TestStoragePoolsCreate_MissingNodes(t *testing.T) {
-	if testing.Short() {
-		t.Skip("skipping storage-pools targetNode test in short mode.")
-	}
 	daemons, cleanup := newDaemons(t, 2)
 	defer cleanup()
 
diff --git a/lxd/networks.go b/lxd/networks.go
index 1195a8dfa..181f89bab 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -217,6 +217,9 @@ func networksPostCluster(d *Daemon, req api.NetworksPost) error {
 		return tx.NetworkConfigAdd(networkID, 0, req.Config)
 	})
 	if err != nil {
+		if err == db.NoSuchObjectError {
+			return fmt.Errorf("Network not pending on any node (use --target <node> first)")
+		}
 		return err
 	}
 
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 63ffca8da..58bb9cf16 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -188,6 +188,9 @@ func storagePoolsPostCluster(d *Daemon, req api.StoragePoolsPost) error {
 		return tx.StoragePoolConfigAdd(poolID, 0, req.Config)
 	})
 	if err != nil {
+		if err == db.NoSuchObjectError {
+			return fmt.Errorf("Pool not pending on any node (use --target <node> first)")
+		}
 		return err
 	}
 

From 94a8249462bb50a58362ea2909d082e321c73d0f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 08:54:43 +0000
Subject: [PATCH 191/227] Handle deleting networks and pools in the PENDING
 state

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_networks_test.go      |  8 ++++++++
 lxd/api_storage_pools_test.go |  8 ++++++++
 lxd/networks.go               | 15 +++++++++++++++
 lxd/storage_pools.go          | 14 ++++++++++++++
 4 files changed, 45 insertions(+)

diff --git a/lxd/api_networks_test.go b/lxd/api_networks_test.go
index 1ad93500f..980e9eca1 100644
--- a/lxd/api_networks_test.go
+++ b/lxd/api_networks_test.go
@@ -34,6 +34,14 @@ func TestNetworksCreate_TargetNode(t *testing.T) {
 
 	assert.Equal(t, "PENDING", network.State)
 	assert.Equal(t, []string{"rusp-0"}, network.Nodes)
+
+	// If a network is pending, deleting it just means removing the
+	// relevant rows from the database.
+	err = client.DeleteNetwork("mynetwork")
+	require.NoError(t, err)
+
+	_, _, err = client.GetNetwork("mynetwork")
+	require.EqualError(t, err, "not found")
 }
 
 // An error is returned when trying to create a new network in a cluster
diff --git a/lxd/api_storage_pools_test.go b/lxd/api_storage_pools_test.go
index a268a1a30..ede3d8fdc 100644
--- a/lxd/api_storage_pools_test.go
+++ b/lxd/api_storage_pools_test.go
@@ -34,6 +34,14 @@ func TestStoragePoolsCreate_TargetNode(t *testing.T) {
 	require.NoError(t, err)
 
 	assert.Equal(t, "PENDING", pool.State)
+
+	// If a storage pool is pending, deleting it just means removing the
+	// relevant rows from the database.
+	err = client.DeleteStoragePool("mypool")
+	require.NoError(t, err)
+
+	_, _, err = client.GetStoragePool("mypool")
+	require.EqualError(t, err, "not found")
 }
 
 // An error is returned when trying to create a new storage pool in a cluster
diff --git a/lxd/networks.go b/lxd/networks.go
index 181f89bab..3db245e25 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -406,11 +406,26 @@ func networkDelete(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
 	state := d.State()
 
+	// Check if the network is pending, if so we just need to delete it from
+	// the database.
+	_, network, err := d.cluster.NetworkGet(name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if network.State == "PENDING" {
+		err := d.cluster.NetworkDelete(name)
+		if err != nil {
+			return SmartError(err)
+		}
+		return EmptySyncResponse
+	}
+
 	// Get the existing network
 	n, err := networkLoadByName(state, name)
 	if err != nil {
 		return NotFound
 	}
+
 	if isClusterNotification(r) {
 		n.state = nil // We just want to delete the network from the system
 	} else {
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 58bb9cf16..5987bbf9b 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -405,6 +405,20 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 		}
 	}
 
+	// Check if the pool is pending, if so we just need to delete it from
+	// the database.
+	_, pool, err := d.cluster.StoragePoolGet(poolName)
+	if err != nil {
+		return SmartError(err)
+	}
+	if pool.State == "PENDING" {
+		_, err := d.cluster.StoragePoolDelete(poolName)
+		if err != nil {
+			return SmartError(err)
+		}
+		return EmptySyncResponse
+	}
+
 	s, err := storagePoolInit(d.State(), poolName)
 	if err != nil {
 		return InternalError(err)

From 90c91fe8d6cc453719ea4b110156a93ed72a9609 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 09:05:44 +0000
Subject: [PATCH 192/227] Extract storage pool validation into a standalone
 function

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/storage_pools_utils.go | 28 ++++++++++++++++++----------
 1 file changed, 18 insertions(+), 10 deletions(-)

diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 7e5a80492..4f483b970 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -157,14 +157,8 @@ func profilesUsingPoolGetNames(db *db.Cluster, poolName string) ([]string, error
 }
 
 func storagePoolDBCreate(s *state.State, poolName, poolDescription string, driver string, config map[string]string) error {
-	// Check if the storage pool name is valid.
-	err := storageValidName(poolName)
-	if err != nil {
-		return err
-	}
-
 	// Check that the storage pool does not already exist.
-	_, err = s.Cluster.StoragePoolGetID(poolName)
+	_, err := s.Cluster.StoragePoolGetID(poolName)
 	if err == nil {
 		return fmt.Errorf("The storage pool already exists")
 	}
@@ -173,9 +167,7 @@ func storagePoolDBCreate(s *state.State, poolName, poolDescription string, drive
 	if config == nil {
 		config = map[string]string{}
 	}
-
-	// Validate the requested storage pool configuration.
-	err = storagePoolValidateConfig(poolName, driver, config, nil)
+	err = storagePoolValidate(poolName, driver, config)
 	if err != nil {
 		return err
 	}
@@ -195,6 +187,22 @@ func storagePoolDBCreate(s *state.State, poolName, poolDescription string, drive
 	return nil
 }
 
+func storagePoolValidate(poolName string, driver string, config map[string]string) error {
+	// Check if the storage pool name is valid.
+	err := storageValidName(poolName)
+	if err != nil {
+		return err
+	}
+
+	// Validate the requested storage pool configuration.
+	err = storagePoolValidateConfig(poolName, driver, config, nil)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
 func storagePoolCreateInternal(state *state.State, poolName, poolDescription string, driver string, config map[string]string) error {
 	err := storagePoolDBCreate(state, poolName, poolDescription, driver, config)
 	if err != nil {

From 8075426c99975d97b9d9981ec79c9017ad80a44c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 09:19:04 +0000
Subject: [PATCH 193/227] Perform pool parameter validation for
 clustering-related requests

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/storage_pools.go | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 5987bbf9b..fd0e003d1 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -98,6 +98,10 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 		// This is an internal request which triggers the actual
 		// creation of the pool across all nodes, after they have been
 		// previously defined.
+		err = storagePoolValidate(req.Name, req.Driver, req.Config)
+		if err != nil {
+			return BadRequest(err)
+		}
 		err = doStoragePoolCreateInternal(
 			d.State(), req.Name, req.Description, req.Driver, req.Config)
 		if err != nil {
@@ -139,6 +143,12 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 			return SmartError(fmt.Errorf("Invalid config key '%s'", key))
 		}
 	}
+
+	err = storagePoolValidate(req.Name, req.Driver, req.Config)
+	if err != nil {
+		return BadRequest(err)
+	}
+
 	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
 		return tx.StoragePoolCreatePending(targetNode, req.Name, req.Driver, req.Config)
 	})
@@ -199,6 +209,10 @@ func storagePoolsPostCluster(d *Daemon, req api.StoragePoolsPost) error {
 	for key, value := range configs[nodeName] {
 		nodeReq.Config[key] = value
 	}
+	err = storagePoolValidate(req.Name, req.Driver, req.Config)
+	if err != nil {
+		return err
+	}
 	err = doStoragePoolCreateInternal(
 		d.State(), req.Name, req.Description, req.Driver, req.Config)
 	if err != nil {

From 3cab30f7179c254bc18388df40e7492377cfbec4 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 09:19:35 +0000
Subject: [PATCH 194/227] Disable clustering integration tests on CI

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/main.sh | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/test/main.sh b/test/main.sh
index de9f89284..73fc36110 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -194,11 +194,15 @@ run_test test_kernel_limits "kernel limits"
 run_test test_macaroon_auth "macaroon authentication"
 run_test test_console "console"
 run_test test_proxy_device "proxy device"
-run_test test_clustering_membership "clustering membership"
-run_test test_clustering_containers "clustering containers"
-run_test test_clustering_storage "clustering storage"
-run_test test_clustering_network "clustering network"
-run_test test_clustering_upgrade "clustering upgrade"
+
+# FIXME: clustering tests are currently not working on CI
+if ! [ -e "/lxc-ci/build/cache/sqlite" ]; then
+    run_test test_clustering_membership "clustering membership"
+    run_test test_clustering_containers "clustering containers"
+    run_test test_clustering_storage "clustering storage"
+    run_test test_clustering_network "clustering network"
+    run_test test_clustering_upgrade "clustering upgrade"
+fi
 
 # shellcheck disable=SC2034
 TEST_RESULT=success

From f9cec5619cae25fbdd6062ff633402ca36db22cd Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 11:34:06 +0000
Subject: [PATCH 195/227] Add "size" to the list of node-specific storage pool
 config keys

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/migration_test.go | 9 ++++++---
 lxd/db/storage_pools.go  | 1 +
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 561a76dbb..b5a340088 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -85,6 +85,7 @@ func TestImportPreClusteringData(t *testing.T) {
 	assert.Equal(t, "/foo/bar", pool.Config["source"])
 	assert.Equal(t, "123", pool.Config["size"])
 	assert.Equal(t, "/foo/bar", pool.Config["volatile.initial_source"])
+	assert.Equal(t, "mypool", pool.Config["zfs.pool_name"])
 	assert.Equal(t, "CREATED", pool.State)
 	assert.Equal(t, []string{"none"}, pool.Nodes)
 	volumes, err := cluster.StoragePoolVolumesGet(id, []int{1})
@@ -93,18 +94,19 @@ func TestImportPreClusteringData(t *testing.T) {
 	assert.Equal(t, "/foo/bar", volumes[0].Config["source"])
 
 	err = cluster.Transaction(func(tx *db.ClusterTx) error {
-		// The size config got a NULL node_id, since it's cluster global.
+		// The zfs.pool_name config got a NULL node_id, since it's cluster global.
 		config, err := query.SelectConfig(tx.Tx(), "storage_pools_config", "node_id IS NULL")
 		require.NoError(t, err)
-		assert.Equal(t, map[string]string{"size": "123"}, config)
+		assert.Equal(t, map[string]string{"zfs.pool_name": "mypool"}, config)
 
 		// The other config keys are node-specific.
 		config, err = query.SelectConfig(tx.Tx(), "storage_pools_config", "node_id=?", 1)
 		require.NoError(t, err)
 		assert.Equal(t,
 			map[string]string{
+				"source": "/foo/bar",
+				"size":   "123",
 				"volatile.initial_source": "/foo/bar",
-				"source":                  "/foo/bar",
 			}, config)
 
 		return nil
@@ -170,6 +172,7 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 		"INSERT INTO storage_pools_config VALUES(1, 1, 'source', '/foo/bar')",
 		"INSERT INTO storage_pools_config VALUES(2, 1, 'size', '123')",
 		"INSERT INTO storage_pools_config VALUES(3, 1, 'volatile.initial_source', '/foo/bar')",
+		"INSERT INTO storage_pools_config VALUES(4, 1, 'zfs.pool_name', 'mypool')",
 		"INSERT INTO storage_volumes VALUES (1, 'dev', 1, 1, '')",
 		"INSERT INTO storage_volumes_config VALUES(1, 1, 'source', '/foo/bar')",
 	}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index a4a5754d9..dbda14dd8 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -792,6 +792,7 @@ const (
 // StoragePoolNodeConfigKeys lists all storage pool config keys which are
 // node-specific.
 var StoragePoolNodeConfigKeys = []string{
+	"size",
 	"source",
 	"volatile.initial_source",
 }

From 2b9f328a14076a1558f7437d9ddd236608f1180c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 11:49:20 +0000
Subject: [PATCH 196/227] Make lxd sql also work with lower-case "select"

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_internal.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 0c2ca82d6..3f644db96 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -111,7 +111,7 @@ func internalSQL(d *Daemon, r *http.Request) Response {
 	}
 	db := d.cluster.DB()
 	result := internalSQLResult{}
-	if strings.HasPrefix(req.Query, "SELECT") {
+	if strings.HasPrefix(strings.ToUpper(req.Query), "SELECT") {
 		rows, err := db.Query(req.Query)
 		if err != nil {
 			return SmartError(err)

From 74497142a2e74c8b835a184078f9adef0e9de225 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 13:02:17 +0000
Subject: [PATCH 197/227] Move node_id column from storage_volumes_config to
 storage_volumes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go      | 12 ++++++------
 lxd/db/cluster/update.go      | 12 ++++++------
 lxd/db/cluster/update_test.go |  6 ------
 lxd/db/containers.go          |  4 ++--
 lxd/db/images.go              |  4 ++--
 lxd/db/migration.go           |  8 ++++----
 lxd/db/migration_test.go      |  6 ++++++
 lxd/db/storage_pools.go       | 27 ++++++++++++++-------------
 lxd/db/storage_volumes.go     | 14 +++++++-------
 9 files changed, 47 insertions(+), 46 deletions(-)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 2609dce8c..29bc77316 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -218,20 +218,20 @@ CREATE TABLE storage_volumes (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
     storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
     type INTEGER NOT NULL,
     description TEXT,
-    UNIQUE (storage_pool_id, name, type),
-    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+    UNIQUE (storage_pool_id, node_id, name, type),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
 CREATE TABLE storage_volumes_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     storage_volume_id INTEGER NOT NULL,
-    node_id INTEGER NOT NULL,
     key TEXT NOT NULL,
     value TEXT,
-    UNIQUE (storage_volume_id, node_id, key),
-    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE,
-    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+    UNIQUE (storage_volume_id, key),
+    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE
 );
 
 INSERT INTO schema (version, updated_at) VALUES (5, strftime("%s"))
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 943f457b4..264960cc6 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -254,20 +254,20 @@ CREATE TABLE storage_volumes (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
     storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
     type INTEGER NOT NULL,
     description TEXT,
-    UNIQUE (storage_pool_id, name, type),
-    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+    UNIQUE (storage_pool_id, node_id, name, type),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
 CREATE TABLE storage_volumes_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     storage_volume_id INTEGER NOT NULL,
-    node_id INTEGER NOT NULL,
     key TEXT NOT NULL,
     value TEXT,
-    UNIQUE (storage_volume_id, node_id, key),
-    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE,
-    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+    UNIQUE (storage_volume_id, key),
+    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE
 );
 `
 	_, err := tx.Exec(stmt)
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index c015e85a6..6f7fca069 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -114,12 +114,6 @@ func TestUpdateFromV1_ConfigTables(t *testing.T) {
 		_, err := db.Exec("INSERT INTO storage_pools VALUES (1, 'default', 'dir', '')")
 		require.NoError(t, err)
 	})
-	testConfigTable(t, "storage_volumes", func(db *sql.DB) {
-		_, err := db.Exec("INSERT INTO storage_pools VALUES (1, 'default', 'dir', '')")
-		require.NoError(t, err)
-		_, err = db.Exec("INSERT INTO storage_volumes VALUES (1, 'dev', 1, 1, '')")
-		require.NoError(t, err)
-	})
 }
 
 func testConfigTable(t *testing.T, table string, setup func(db *sql.DB)) {
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 200791101..9e3519ce0 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -655,8 +655,8 @@ func (c *Cluster) ContainerPool(containerName string) (string, error) {
 	poolName := ""
 	query := `SELECT storage_pools.name FROM storage_pools
 JOIN storage_volumes ON storage_pools.id=storage_volumes.storage_pool_id
-WHERE storage_volumes.name=? AND storage_volumes.type=?`
-	inargs := []interface{}{containerName, StoragePoolVolumeTypeContainer}
+WHERE storage_volumes.node_id=? AND storage_volumes.name=? AND storage_volumes.type=?`
+	inargs := []interface{}{c.nodeID, containerName, StoragePoolVolumeTypeContainer}
 	outargs := []interface{}{&poolName}
 
 	err := dbQueryRowScan(c.db, query, inargs, outargs)
diff --git a/lxd/db/images.go b/lxd/db/images.go
index c9138afb9..65dc1978b 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -612,8 +612,8 @@ func (c *Cluster) ImageInsert(fp string, fname string, sz int64, public bool, au
 // Get the names of all storage pools on which a given image exists.
 func (c *Cluster) ImageGetPools(imageFingerprint string) ([]int64, error) {
 	poolID := int64(-1)
-	query := "SELECT storage_pool_id FROM storage_volumes WHERE name=? AND type=?"
-	inargs := []interface{}{imageFingerprint, StoragePoolVolumeTypeImage}
+	query := "SELECT storage_pool_id FROM storage_volumes WHERE node_id=? AND name=? AND type=?"
+	inargs := []interface{}{c.nodeID, imageFingerprint, StoragePoolVolumeTypeImage}
 	outargs := []interface{}{poolID}
 
 	result, err := queryScan(c.db, query, inargs, outargs)
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index f78821aea..6f46e037d 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -88,7 +88,7 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 			}
 			columns := dump.Schema[table]
 
-			nullNodeID := false // Whether config-related rows should have a NULL node ID
+			nullNodeID := false // Whether node-related rows should have a NULL node ID
 			appendNodeID := func() {
 				columns = append(columns, "node_id")
 				if nullNodeID {
@@ -113,7 +113,7 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 					continue
 				}
 			case "containers":
-				fallthrough
+				appendNodeID()
 			case "networks_config":
 				// The "bridge.external_interfaces" config key
 				// is the only one which is not global to the
@@ -144,13 +144,13 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 					break
 				}
 				appendNodeID()
-			case "storage_volumes_config":
-				appendNodeID()
 			case "networks":
 				fallthrough
 			case "storage_pools":
 				columns = append(columns, "state")
 				row = append(row, storagePoolCreated)
+			case "storage_volumes":
+				appendNodeID()
 			}
 			stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
 			stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index b5a340088..4f174e06b 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -109,6 +109,12 @@ func TestImportPreClusteringData(t *testing.T) {
 				"volatile.initial_source": "/foo/bar",
 			}, config)
 
+		// Storage volumes have now a node_id key set to 1 (the ID of
+		// the default node).
+		ids, err := query.SelectIntegers(tx.Tx(), "SELECT node_id FROM storage_volumes")
+		require.NoError(t, err)
+		assert.Equal(t, []int{1}, ids)
+
 		return nil
 	})
 	require.NoError(t, err)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index dbda14dd8..a9c7d2052 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -6,6 +6,7 @@ import (
 	"strings"
 
 	_ "github.com/mattn/go-sqlite3"
+	"github.com/pkg/errors"
 
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared"
@@ -553,8 +554,8 @@ func (c *Cluster) StoragePoolDelete(poolName string) (*api.StoragePool, error) {
 // Get the names of all storage volumes attached to a given storage pool.
 func (c *Cluster) StoragePoolVolumesGetNames(poolID int64) (int, error) {
 	var volumeName string
-	query := "SELECT name FROM storage_volumes WHERE storage_pool_id=?"
-	inargs := []interface{}{poolID}
+	query := "SELECT name FROM storage_volumes WHERE storage_pool_id=? AND node_id=?"
+	inargs := []interface{}{poolID, c.nodeID}
 	outargs := []interface{}{volumeName}
 
 	result, err := queryScan(c.db, query, inargs, outargs)
@@ -577,12 +578,12 @@ func (c *Cluster) StoragePoolVolumesGet(poolID int64, volumeTypes []int) ([]*api
 	for _, volumeType := range volumeTypes {
 		volumeNames, err := c.StoragePoolVolumesGetType(volumeType, poolID)
 		if err != nil && err != sql.ErrNoRows {
-			return nil, err
+			return nil, errors.Wrap(err, "failed to fetch volume types")
 		}
 		for _, volumeName := range volumeNames {
 			_, volume, err := c.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 			if err != nil {
-				return nil, err
+				return nil, errors.Wrap(err, "failed to fetch volume type")
 			}
 			result = append(result, volume)
 		}
@@ -599,8 +600,8 @@ func (c *Cluster) StoragePoolVolumesGet(poolID int64, volumeTypes []int) ([]*api
 // type.
 func (c *Cluster) StoragePoolVolumesGetType(volumeType int, poolID int64) ([]string, error) {
 	var poolName string
-	query := "SELECT name FROM storage_volumes WHERE storage_pool_id=? AND type=?"
-	inargs := []interface{}{poolID, volumeType}
+	query := "SELECT name FROM storage_volumes WHERE storage_pool_id=? AND node_id=? AND type=?"
+	inargs := []interface{}{poolID, c.nodeID, volumeType}
 	outargs := []interface{}{poolName}
 
 	result, err := queryScan(c.db, query, inargs, outargs)
@@ -660,13 +661,13 @@ func (c *Cluster) StoragePoolVolumeUpdate(volumeName string, volumeType int, poo
 		return err
 	}
 
-	err = StorageVolumeConfigClear(tx, volumeID, c.nodeID)
+	err = StorageVolumeConfigClear(tx, volumeID)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = StorageVolumeConfigAdd(tx, volumeID, c.nodeID, volumeConfig)
+	err = StorageVolumeConfigAdd(tx, volumeID, volumeConfig)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -724,8 +725,8 @@ func (c *Cluster) StoragePoolVolumeCreate(volumeName, volumeDescription string,
 		return -1, err
 	}
 
-	result, err := tx.Exec("INSERT INTO storage_volumes (storage_pool_id, type, name, description) VALUES (?, ?, ?, ?)",
-		poolID, volumeType, volumeName, volumeDescription)
+	result, err := tx.Exec("INSERT INTO storage_volumes (storage_pool_id, node_id, type, name, description) VALUES (?, ?, ?, ?, ?)",
+		poolID, c.nodeID, volumeType, volumeName, volumeDescription)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -737,7 +738,7 @@ func (c *Cluster) StoragePoolVolumeCreate(volumeName, volumeDescription string,
 		return -1, err
 	}
 
-	err = StorageVolumeConfigAdd(tx, volumeID, c.nodeID, volumeConfig)
+	err = StorageVolumeConfigAdd(tx, volumeID, volumeConfig)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -759,9 +760,9 @@ func (c *Cluster) StoragePoolVolumeGetTypeID(volumeName string, volumeType int,
 FROM storage_volumes
 JOIN storage_pools
 ON storage_volumes.storage_pool_id = storage_pools.id
-WHERE storage_volumes.storage_pool_id=?
+WHERE storage_volumes.storage_pool_id=? AND storage_volumes.node_id=?
 AND storage_volumes.name=? AND storage_volumes.type=?`
-	inargs := []interface{}{poolID, volumeName, volumeType}
+	inargs := []interface{}{poolID, c.nodeID, volumeName, volumeType}
 	outargs := []interface{}{&volumeID}
 
 	err := dbQueryRowScan(c.db, query, inargs, outargs)
diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index e40bc11cb..92ad4def3 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -11,8 +11,8 @@ import (
 // Get config of a storage volume.
 func (c *Cluster) StorageVolumeConfigGet(volumeID int64) (map[string]string, error) {
 	var key, value string
-	query := "SELECT key, value FROM storage_volumes_config WHERE storage_volume_id=? AND node_id=?"
-	inargs := []interface{}{volumeID, c.nodeID}
+	query := "SELECT key, value FROM storage_volumes_config WHERE storage_volume_id=?"
+	inargs := []interface{}{volumeID}
 	outargs := []interface{}{key, value}
 
 	results, err := queryScan(c.db, query, inargs, outargs)
@@ -56,8 +56,8 @@ func StorageVolumeDescriptionUpdate(tx *sql.Tx, volumeID int64, description stri
 }
 
 // Add new storage volume config into database.
-func StorageVolumeConfigAdd(tx *sql.Tx, volumeID, nodeID int64, volumeConfig map[string]string) error {
-	str := "INSERT INTO storage_volumes_config (storage_volume_id, node_id, key, value) VALUES(?, ?, ?, ?)"
+func StorageVolumeConfigAdd(tx *sql.Tx, volumeID int64, volumeConfig map[string]string) error {
+	str := "INSERT INTO storage_volumes_config (storage_volume_id, key, value) VALUES(?, ?, ?)"
 	stmt, err := tx.Prepare(str)
 	defer stmt.Close()
 	if err != nil {
@@ -69,7 +69,7 @@ func StorageVolumeConfigAdd(tx *sql.Tx, volumeID, nodeID int64, volumeConfig map
 			continue
 		}
 
-		_, err = stmt.Exec(volumeID, nodeID, k, v)
+		_, err = stmt.Exec(volumeID, k, v)
 		if err != nil {
 			return err
 		}
@@ -79,8 +79,8 @@ func StorageVolumeConfigAdd(tx *sql.Tx, volumeID, nodeID int64, volumeConfig map
 }
 
 // Delete storage volume config.
-func StorageVolumeConfigClear(tx *sql.Tx, volumeID, nodeID int64) error {
-	_, err := tx.Exec("DELETE FROM storage_volumes_config WHERE storage_volume_id=? AND node_id", volumeID, nodeID)
+func StorageVolumeConfigClear(tx *sql.Tx, volumeID int64) error {
+	_, err := tx.Exec("DELETE FROM storage_volumes_config WHERE storage_volume_id=?", volumeID)
 	if err != nil {
 		return err
 	}

From 910dae45cb7fa3e93df7e7078f51ab475c1c533d Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 13:33:33 +0000
Subject: [PATCH 198/227] Skip pending networks and pools when checking joining
 nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go        |  8 ++++----
 lxd/cluster/membership.go |  4 ++--
 lxd/db/networks.go        | 28 +++++++++++++++++++++++++---
 lxd/db/storage_pools.go   | 31 ++++++++++++++++++++++++++-----
 test/suites/clustering.sh |  6 ++++++
 5 files changed, 63 insertions(+), 14 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index eb8e8a8f6..d0c2bf670 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -43,7 +43,7 @@ func clusterGet(d *Daemon, r *http.Request) Response {
 	cluster := api.Cluster{}
 
 	// Fill the Networks attribute
-	networks, err := d.cluster.Networks()
+	networks, err := d.cluster.NetworksNotPending()
 	if err != nil {
 		return SmartError(err)
 	}
@@ -56,7 +56,7 @@ func clusterGet(d *Daemon, r *http.Request) Response {
 	}
 
 	// Fill the StoragePools attribute
-	pools, err := d.cluster.StoragePools()
+	pools, err := d.cluster.StoragePoolsNotPending()
 	if err != nil && err != db.NoSuchObjectError {
 		return SmartError(err)
 	}
@@ -243,7 +243,7 @@ func clusterNodesPostAccept(d *Daemon, req api.ClusterPost) Response {
 }
 
 func clusterCheckStoragePoolsMatch(cluster *db.Cluster, reqPools []api.StoragePool) error {
-	poolNames, err := cluster.StoragePools()
+	poolNames, err := cluster.StoragePoolsNotPending()
 	if err != nil && err != db.NoSuchObjectError {
 		return err
 	}
@@ -277,7 +277,7 @@ func clusterCheckStoragePoolsMatch(cluster *db.Cluster, reqPools []api.StoragePo
 }
 
 func clusterCheckNetworksMatch(cluster *db.Cluster, reqNetworks []api.Network) error {
-	networkNames, err := cluster.Networks()
+	networkNames, err := cluster.NetworksNotPending()
 	if err != nil && err != db.NoSuchObjectError {
 		return err
 	}
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index c42738ab1..c2e946313 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -320,7 +320,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 		tx.NodeID(node.ID)
 
 		// Storage pools.
-		ids, err := tx.StoragePoolIDs()
+		ids, err := tx.StoragePoolIDsNotPending()
 		if err != nil {
 			return errors.Wrap(err, "failed to get cluster storage pool IDs")
 		}
@@ -347,7 +347,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 		}
 
 		// Networks.
-		ids, err = tx.NetworkIDs()
+		ids, err = tx.NetworkIDsNotPending()
 		if err != nil {
 			return errors.Wrap(err, "failed to get cluster network IDs")
 		}
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 16177da78..794f1f8f0 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -31,8 +31,10 @@ func (c *ClusterTx) NetworkConfigs() (map[string]map[string]string, error) {
 	return networks, nil
 }
 
-// NetworkIDs returns a map associating each network name to its ID.
-func (c *ClusterTx) NetworkIDs() (map[string]int64, error) {
+// NetworkIDsNotPending returns a map associating each network name to its ID.
+//
+// Pending networks are skipped.
+func (c *ClusterTx) NetworkIDsNotPending() (map[string]int64, error) {
 	networks := []struct {
 		id   int64
 		name string
@@ -45,7 +47,8 @@ func (c *ClusterTx) NetworkIDs() (map[string]int64, error) {
 		return []interface{}{&networks[i].id, &networks[i].name}
 
 	}
-	err := query.SelectObjects(c.tx, dest, "SELECT id, name FROM networks")
+	stmt := "SELECT id, name FROM networks WHERE NOT state=?"
+	err := query.SelectObjects(c.tx, dest, stmt, networkPending)
 	if err != nil {
 		return nil, err
 	}
@@ -239,8 +242,27 @@ func (c *ClusterTx) networkState(name string, state int) error {
 }
 
 func (c *Cluster) Networks() ([]string, error) {
+	return c.networks("")
+}
+
+// NetworksNotPending returns the names of all networks that are not
+// pending.
+func (c *Cluster) NetworksNotPending() ([]string, error) {
+	return c.networks("NOT state=?", networkPending)
+}
+
+// Get all networks matching the given WHERE filter (if given).
+func (c *Cluster) networks(where string, args ...interface{}) ([]string, error) {
 	q := "SELECT name FROM networks"
 	inargs := []interface{}{}
+
+	if where != "" {
+		q += fmt.Sprintf(" WHERE %s", where)
+		for _, arg := range args {
+			inargs = append(inargs, arg)
+		}
+	}
+
 	var name string
 	outfmt := []interface{}{name}
 	result, err := queryScan(c.db, q, inargs, outfmt)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index a9c7d2052..be460f2a0 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -57,8 +57,10 @@ func (c *ClusterTx) StoragePoolID(name string) (int64, error) {
 	}
 }
 
-// StoragePoolIDs returns a map associating each storage pool name to its ID.
-func (c *ClusterTx) StoragePoolIDs() (map[string]int64, error) {
+// StoragePoolIDsNotPending returns a map associating each storage pool name to its ID.
+//
+// Pending storage pools are skipped.
+func (c *ClusterTx) StoragePoolIDsNotPending() (map[string]int64, error) {
 	pools := []struct {
 		id   int64
 		name string
@@ -71,7 +73,8 @@ func (c *ClusterTx) StoragePoolIDs() (map[string]int64, error) {
 		return []interface{}{&pools[i].id, &pools[i].name}
 
 	}
-	err := query.SelectObjects(c.tx, dest, "SELECT id, name FROM storage_pools")
+	stmt := "SELECT id, name FROM storage_pools WHERE NOT state=?"
+	err := query.SelectObjects(c.tx, dest, stmt, storagePoolPending)
 	if err != nil {
 		return nil, err
 	}
@@ -261,12 +264,30 @@ WHERE storage_pools.id = ? AND storage_pools.state = ?
 
 // Get all storage pools.
 func (c *Cluster) StoragePools() ([]string, error) {
+	return c.storagePools("")
+}
+
+// StoragePoolsNotPending returns the names of all storage pools that are not
+// pending.
+func (c *Cluster) StoragePoolsNotPending() ([]string, error) {
+	return c.storagePools("NOT state=?", storagePoolPending)
+}
+
+// Get all storage pools matching the given WHERE filter (if given).
+func (c *Cluster) storagePools(where string, args ...interface{}) ([]string, error) {
 	var name string
-	query := "SELECT name FROM storage_pools"
+	stmt := "SELECT name FROM storage_pools"
 	inargs := []interface{}{}
 	outargs := []interface{}{name}
 
-	result, err := queryScan(c.db, query, inargs, outargs)
+	if where != "" {
+		stmt += fmt.Sprintf(" WHERE %s", where)
+		for _, arg := range args {
+			inargs = append(inargs, arg)
+		}
+	}
+
+	result, err := queryScan(c.db, stmt, inargs, outargs)
 	if err != nil {
 		return []string{}, err
 	}
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index fbde7fb24..47e821014 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -28,6 +28,12 @@ test_clustering_membership() {
   ip netns exec "${ns1}" ip link show "${bridge}" > /dev/null
   ip netns exec "${ns2}" ip link show "${bridge}" > /dev/null
 
+  # Create a pending network and pool, to show that they are not
+  # considered when checking if the joining node has all the required
+  # networks and pools.
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 dir --target node1
+  LXD_DIR="${LXD_ONE_DIR}" lxc network create net1 --target node2
+  
   # Spawn a third node, using the non-leader node2 as join target.
   setup_clustering_netns 3
   LXD_THREE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)

From 2d5c54eaaecccea95044dfdf1165f30449f2f6d9 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 13:51:29 +0000
Subject: [PATCH 199/227] Skip pending pools upon daemon startup storage
 initialization

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/storage.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index aff9fcf44..e6a4aff50 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -804,7 +804,7 @@ func StorageProgressWriter(op *operation, key string, description string) func(i
 }
 
 func SetupStorageDriver(s *state.State, forceCheck bool) error {
-	pools, err := s.Cluster.StoragePools()
+	pools, err := s.Cluster.StoragePoolsNotPending()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			logger.Debugf("No existing storage pools detected.")

From c3a9de5e997aa97bbf0c1b2a8afc368abd4d17bc Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 15:37:45 +0000
Subject: [PATCH 200/227] Rename StoragePoolVolumesGet to
 StoragePoolNodeVolumesGet

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/migration_test.go   | 2 +-
 lxd/db/storage_pools.go    | 5 +++--
 lxd/patches.go             | 6 +++---
 lxd/storage_pools_utils.go | 2 +-
 lxd/storage_volumes.go     | 2 +-
 5 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 4f174e06b..91e269d83 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -88,7 +88,7 @@ func TestImportPreClusteringData(t *testing.T) {
 	assert.Equal(t, "mypool", pool.Config["zfs.pool_name"])
 	assert.Equal(t, "CREATED", pool.State)
 	assert.Equal(t, []string{"none"}, pool.Nodes)
-	volumes, err := cluster.StoragePoolVolumesGet(id, []int{1})
+	volumes, err := cluster.StoragePoolNodeVolumesGet(id, []int{1})
 	require.NoError(t, err)
 	assert.Len(t, volumes, 1)
 	assert.Equal(t, "/foo/bar", volumes[0].Config["source"])
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index be460f2a0..0db3f169a 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -591,8 +591,9 @@ func (c *Cluster) StoragePoolVolumesGetNames(poolID int64) (int, error) {
 	return len(result), nil
 }
 
-// Get all storage volumes attached to a given storage pool.
-func (c *Cluster) StoragePoolVolumesGet(poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
+// Get all storage volumes attached to a given storage pool on the current
+// node.
+func (c *Cluster) StoragePoolNodeVolumesGet(poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
 	// Get all storage volumes of all types attached to a given storage
 	// pool.
 	result := []*api.StorageVolume{}
diff --git a/lxd/patches.go b/lxd/patches.go
index 43c84f6d1..d6f900048 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -2050,7 +2050,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
 		}
 
 		// Get all storage volumes on the storage pool.
-		volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+		volumes, err := d.cluster.StoragePoolNodeVolumesGet(poolID, supportedVolumeTypes)
 		if err != nil {
 			if err == db.NoSuchObjectError {
 				continue
@@ -2206,7 +2206,7 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
 		}
 
 		// Get all storage volumes on the storage pool.
-		volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+		volumes, err := d.cluster.StoragePoolNodeVolumesGet(poolID, supportedVolumeTypes)
 		if err != nil {
 			if err == db.NoSuchObjectError {
 				continue
@@ -2355,7 +2355,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
 		}
 
 		// Get all storage volumes on the storage pool.
-		volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+		volumes, err := d.cluster.StoragePoolNodeVolumesGet(poolID, supportedVolumeTypes)
 		if err != nil {
 			if err == db.NoSuchObjectError {
 				continue
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 4f483b970..b8dbf953a 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -82,7 +82,7 @@ func storagePoolUpdate(state *state.State, name, newDescription string, newConfi
 // /1.0/profiles/default
 func storagePoolUsedByGet(state *state.State, poolID int64, poolName string) ([]string, error) {
 	// Retrieve all non-custom volumes that exist on this storage pool.
-	volumes, err := state.Cluster.StoragePoolVolumesGet(poolID, []int{storagePoolVolumeTypeContainer, storagePoolVolumeTypeImage, storagePoolVolumeTypeCustom})
+	volumes, err := state.Cluster.StoragePoolNodeVolumesGet(poolID, []int{storagePoolVolumeTypeContainer, storagePoolVolumeTypeImage, storagePoolVolumeTypeCustom})
 	if err != nil && err != db.NoSuchObjectError {
 		return []string{}, err
 	}
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 6e227a5fb..2d8eadf14 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -35,7 +35,7 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) Response {
 
 	// Get all volumes currently attached to the storage pool by ID of the
 	// pool.
-	volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+	volumes, err := d.cluster.StoragePoolNodeVolumesGet(poolID, supportedVolumeTypes)
 	if err != nil && err != db.NoSuchObjectError {
 		return SmartError(err)
 	}

From 6199b7484d3d9dad533de61c70f6a9643a6ef62a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 15:40:24 +0000
Subject: [PATCH 201/227] Rename StoragePoolVolumesGetType to
 StoragePoolNodeVolumesGetType

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/storage_pools.go | 6 +++---
 lxd/storage_volumes.go  | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 0db3f169a..ff4992bad 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -598,7 +598,7 @@ func (c *Cluster) StoragePoolNodeVolumesGet(poolID int64, volumeTypes []int) ([]
 	// pool.
 	result := []*api.StorageVolume{}
 	for _, volumeType := range volumeTypes {
-		volumeNames, err := c.StoragePoolVolumesGetType(volumeType, poolID)
+		volumeNames, err := c.StoragePoolNodeVolumesGetType(volumeType, poolID)
 		if err != nil && err != sql.ErrNoRows {
 			return nil, errors.Wrap(err, "failed to fetch volume types")
 		}
@@ -619,8 +619,8 @@ func (c *Cluster) StoragePoolNodeVolumesGet(poolID int64, volumeTypes []int) ([]
 }
 
 // Get all storage volumes attached to a given storage pool of a given volume
-// type.
-func (c *Cluster) StoragePoolVolumesGetType(volumeType int, poolID int64) ([]string, error) {
+// type, on the current node.
+func (c *Cluster) StoragePoolNodeVolumesGetType(volumeType int, poolID int64) ([]string, error) {
 	var poolName string
 	query := "SELECT name FROM storage_volumes WHERE storage_pool_id=? AND node_id=? AND type=?"
 	inargs := []interface{}{poolID, c.nodeID, volumeType}
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 2d8eadf14..d2029cb65 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -102,7 +102,7 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response {
 
 	// Get the names of all storage volumes of a given volume type currently
 	// attached to the storage pool.
-	volumes, err := d.cluster.StoragePoolVolumesGetType(volumeType, poolID)
+	volumes, err := d.cluster.StoragePoolNodeVolumesGetType(volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}

From 5d2ac76756ec0dd18ab4e3e3702c68b0673e46c6 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 15:42:26 +0000
Subject: [PATCH 202/227] Rename StoragePoolVolumeGetType to
 StoragePoolNodeVolumeGetType

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_internal.go      |  4 ++--
 lxd/container_lxc.go     |  2 +-
 lxd/db/storage_pools.go  | 10 +++++-----
 lxd/storage.go           |  2 +-
 lxd/storage_ceph.go      |  2 +-
 lxd/storage_lvm_utils.go |  2 +-
 lxd/storage_volumes.go   |  8 ++++----
 7 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 3f644db96..7ebed0612 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -574,7 +574,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check if a storage volume entry for the container already exists.
-	_, volume, ctVolErr := d.cluster.StoragePoolVolumeGetType(
+	_, volume, ctVolErr := d.cluster.StoragePoolNodeVolumeGetType(
 		req.Name, storagePoolVolumeTypeContainer, poolID)
 	if ctVolErr != nil {
 		if ctVolErr != db.NoSuchObjectError {
@@ -657,7 +657,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 		}
 
 		// Check if a storage volume entry for the snapshot already exists.
-		_, _, csVolErr := d.cluster.StoragePoolVolumeGetType(snap.Name,
+		_, _, csVolErr := d.cluster.StoragePoolNodeVolumeGetType(snap.Name,
 			storagePoolVolumeTypeContainer, poolID)
 		if csVolErr != nil {
 			if csVolErr != db.NoSuchObjectError {
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 6c9a65347..2c29e558a 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -3388,7 +3388,7 @@ func writeBackupFile(c container) error {
 		return err
 	}
 
-	_, volume, err := s.Cluster.StoragePoolVolumeGetType(c.Name(), storagePoolVolumeTypeContainer, poolID)
+	_, volume, err := s.Cluster.StoragePoolNodeVolumeGetType(c.Name(), storagePoolVolumeTypeContainer, poolID)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index ff4992bad..0aaadb91d 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -603,7 +603,7 @@ func (c *Cluster) StoragePoolNodeVolumesGet(poolID int64, volumeTypes []int) ([]
 			return nil, errors.Wrap(err, "failed to fetch volume types")
 		}
 		for _, volumeName := range volumeNames {
-			_, volume, err := c.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+			_, volume, err := c.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
 			if err != nil {
 				return nil, errors.Wrap(err, "failed to fetch volume type")
 			}
@@ -640,7 +640,7 @@ func (c *Cluster) StoragePoolNodeVolumesGetType(volumeType int, poolID int64) ([
 }
 
 // Get a single storage volume attached to a given storage pool of a given type.
-func (c *Cluster) StoragePoolVolumeGetType(volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
+func (c *Cluster) StoragePoolNodeVolumeGetType(volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
 	volumeID, err := c.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
 	if err != nil {
 		return -1, nil, err
@@ -673,7 +673,7 @@ func (c *Cluster) StoragePoolVolumeGetType(volumeName string, volumeType int, po
 
 // Update storage volume attached to a given storage pool.
 func (c *Cluster) StoragePoolVolumeUpdate(volumeName string, volumeType int, poolID int64, volumeDescription string, volumeConfig map[string]string) error {
-	volumeID, _, err := c.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+	volumeID, _, err := c.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return err
 	}
@@ -706,7 +706,7 @@ func (c *Cluster) StoragePoolVolumeUpdate(volumeName string, volumeType int, poo
 
 // Delete storage volume attached to a given storage pool.
 func (c *Cluster) StoragePoolVolumeDelete(volumeName string, volumeType int, poolID int64) error {
-	volumeID, _, err := c.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+	volumeID, _, err := c.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return err
 	}
@@ -721,7 +721,7 @@ func (c *Cluster) StoragePoolVolumeDelete(volumeName string, volumeType int, poo
 
 // Rename storage volume attached to a given storage pool.
 func (c *Cluster) StoragePoolVolumeRename(oldVolumeName string, newVolumeName string, volumeType int, poolID int64) error {
-	volumeID, _, err := c.StoragePoolVolumeGetType(oldVolumeName, volumeType, poolID)
+	volumeID, _, err := c.StoragePoolNodeVolumeGetType(oldVolumeName, volumeType, poolID)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/storage.go b/lxd/storage.go
index e6a4aff50..b08049ff6 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -298,7 +298,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 	// Load the storage volume.
 	volume := &api.StorageVolume{}
 	if volumeName != "" && volumeType >= 0 {
-		_, volume, err = s.Cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+		_, volume, err = s.Cluster.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
 		if err != nil {
 			return nil, err
 		}
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index d5303a936..c22491c06 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -972,7 +972,7 @@ func (s *storageCeph) ContainerCreateFromImage(container container, fingerprint
 			fingerprint, storagePoolVolumeTypeNameImage, s.UserName)
 
 		if ok {
-			_, volume, err := s.s.Cluster.StoragePoolVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
+			_, volume, err := s.s.Cluster.StoragePoolNodeVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
 			if err != nil {
 				return err
 			}
diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go
index 08681d072..1bd244042 100644
--- a/lxd/storage_lvm_utils.go
+++ b/lxd/storage_lvm_utils.go
@@ -497,7 +497,7 @@ func (s *storageLvm) containerCreateFromImageThinLv(c container, fp string) erro
 		var imgerr error
 		ok, _ := storageLVExists(imageLvmDevPath)
 		if ok {
-			_, volume, err := s.s.Cluster.StoragePoolVolumeGetType(fp, db.StoragePoolVolumeTypeImage, s.poolID)
+			_, volume, err := s.s.Cluster.StoragePoolNodeVolumeGetType(fp, db.StoragePoolVolumeTypeImage, s.poolID)
 			if err != nil {
 				return err
 			}
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index d2029cb65..9f6b143ed 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -117,7 +117,7 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response {
 			}
 			resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s/%s", version.APIVersion, poolName, apiEndpoint, volume))
 		} else {
-			_, vol, err := d.cluster.StoragePoolVolumeGetType(volume, volumeType, poolID)
+			_, vol, err := d.cluster.StoragePoolNodeVolumeGetType(volume, volumeType, poolID)
 			if err != nil {
 				continue
 			}
@@ -287,7 +287,7 @@ func storagePoolVolumeTypeGet(d *Daemon, r *http.Request) Response {
 	}
 
 	// Get the storage volume.
-	_, volume, err := d.cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+	_, volume, err := d.cluster.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -331,7 +331,7 @@ func storagePoolVolumeTypePut(d *Daemon, r *http.Request) Response {
 	}
 
 	// Get the existing storage volume.
-	_, volume, err := d.cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+	_, volume, err := d.cluster.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -393,7 +393,7 @@ func storagePoolVolumeTypePatch(d *Daemon, r *http.Request) Response {
 	}
 
 	// Get the existing storage volume.
-	_, volume, err := d.cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+	_, volume, err := d.cluster.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}

From 3c91599716771c79b145fe5f66cc85f3c04f387c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 15:47:09 +0000
Subject: [PATCH 203/227] Rename StoragePoolVolumeGetTypeID to
 StoragePoolNodeVolumeGetTypeID

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/storage_pools.go      |  7 ++++---
 lxd/patches.go               | 24 ++++++++++++------------
 lxd/storage_volumes.go       |  2 +-
 lxd/storage_volumes_utils.go |  2 +-
 4 files changed, 18 insertions(+), 17 deletions(-)

diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 0aaadb91d..4739c2691 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -639,9 +639,10 @@ func (c *Cluster) StoragePoolNodeVolumesGetType(volumeType int, poolID int64) ([
 	return response, nil
 }
 
-// Get a single storage volume attached to a given storage pool of a given type.
+// Get a single storage volume attached to a given storage pool of a given
+// type, on the current node.
 func (c *Cluster) StoragePoolNodeVolumeGetType(volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
-	volumeID, err := c.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
+	volumeID, err := c.StoragePoolNodeVolumeGetTypeID(volumeName, volumeType, poolID)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -776,7 +777,7 @@ func (c *Cluster) StoragePoolVolumeCreate(volumeName, volumeDescription string,
 
 // Get ID of a storage volume on a given storage pool of a given storage volume
 // type.
-func (c *Cluster) StoragePoolVolumeGetTypeID(volumeName string, volumeType int, poolID int64) (int64, error) {
+func (c *Cluster) StoragePoolNodeVolumeGetTypeID(volumeName string, volumeType int, poolID int64) (int64, error) {
 	volumeID := int64(-1)
 	query := `SELECT storage_volumes.id
 FROM storage_volumes
diff --git a/lxd/patches.go b/lxd/patches.go
index d6f900048..c6885d4ef 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -392,7 +392,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 			return err
 		}
 
-		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
 			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
@@ -480,7 +480,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 				return err
 			}
 
-			_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+			_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 			if err == nil {
 				logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
 				err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
@@ -561,7 +561,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 			return err
 		}
 
-		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
 			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
@@ -679,7 +679,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
 			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
@@ -796,7 +796,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
 			err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
@@ -826,7 +826,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
 			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
@@ -988,7 +988,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
 			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
@@ -1143,7 +1143,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 				return err
 			}
 
-			_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+			_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 			if err == nil {
 				logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
 				err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
@@ -1314,7 +1314,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
 			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
@@ -1505,7 +1505,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
 			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
@@ -1591,7 +1591,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 				return err
 			}
 
-			_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+			_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 			if err == nil {
 				logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
 				err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
@@ -1647,7 +1647,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
 			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 9f6b143ed..f4edf93c4 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -237,7 +237,7 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check that the name isn't already in use.
-	_, err = d.cluster.StoragePoolVolumeGetTypeID(req.Name,
+	_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(req.Name,
 		storagePoolVolumeTypeCustom, poolID)
 	if err == nil || err != nil && err != db.NoSuchObjectError {
 		return Conflict
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index 2cfe05647..b90c0eb5b 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -309,7 +309,7 @@ func storagePoolVolumeDBCreate(s *state.State, poolName string, volumeName, volu
 
 	// Check that a storage volume of the same storage volume type does not
 	// already exist.
-	volumeID, _ := s.Cluster.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
+	volumeID, _ := s.Cluster.StoragePoolNodeVolumeGetTypeID(volumeName, volumeType, poolID)
 	if volumeID > 0 {
 		return fmt.Errorf("a storage volume of type %s does already exist", volumeTypeName)
 	}

From 2cdc88b953889cabd3e876ca7da64f0a2f8f5a9a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 19 Jan 2018 15:59:59 +0000
Subject: [PATCH 204/227] Add a nodeID parameter to most volume-related db APIs

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/storage_pools.go | 50 ++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 37 insertions(+), 13 deletions(-)

diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 4739c2691..905d62b39 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -594,16 +594,22 @@ func (c *Cluster) StoragePoolVolumesGetNames(poolID int64) (int, error) {
 // Get all storage volumes attached to a given storage pool on the current
 // node.
 func (c *Cluster) StoragePoolNodeVolumesGet(poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
+	return c.storagePoolVolumesGet(poolID, c.nodeID, volumeTypes)
+}
+
+// Returns all storage volumes attached to a given storage pool on the given
+// node.
+func (c *Cluster) storagePoolVolumesGet(poolID, nodeID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
 	// Get all storage volumes of all types attached to a given storage
 	// pool.
 	result := []*api.StorageVolume{}
 	for _, volumeType := range volumeTypes {
-		volumeNames, err := c.StoragePoolNodeVolumesGetType(volumeType, poolID)
+		volumeNames, err := c.StoragePoolVolumesGetType(volumeType, poolID, nodeID)
 		if err != nil && err != sql.ErrNoRows {
 			return nil, errors.Wrap(err, "failed to fetch volume types")
 		}
 		for _, volumeName := range volumeNames {
-			_, volume, err := c.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
+			_, volume, err := c.StoragePoolVolumeGetType(volumeName, volumeType, poolID, nodeID)
 			if err != nil {
 				return nil, errors.Wrap(err, "failed to fetch volume type")
 			}
@@ -618,12 +624,12 @@ func (c *Cluster) StoragePoolNodeVolumesGet(poolID int64, volumeTypes []int) ([]
 	return result, nil
 }
 
-// Get all storage volumes attached to a given storage pool of a given volume
-// type, on the current node.
-func (c *Cluster) StoragePoolNodeVolumesGetType(volumeType int, poolID int64) ([]string, error) {
+// StoragePoolVolumesGetType get all storage volumes attached to a given
+// storage pool of a given volume type, on the given node.
+func (c *Cluster) StoragePoolVolumesGetType(volumeType int, poolID, nodeID int64) ([]string, error) {
 	var poolName string
 	query := "SELECT name FROM storage_volumes WHERE storage_pool_id=? AND node_id=? AND type=?"
-	inargs := []interface{}{poolID, c.nodeID, volumeType}
+	inargs := []interface{}{poolID, nodeID, volumeType}
 	outargs := []interface{}{poolName}
 
 	result, err := queryScan(c.db, query, inargs, outargs)
@@ -639,10 +645,16 @@ func (c *Cluster) StoragePoolNodeVolumesGetType(volumeType int, poolID int64) ([
 	return response, nil
 }
 
-// Get a single storage volume attached to a given storage pool of a given
+// Get all storage volumes attached to a given storage pool of a given volume
 // type, on the current node.
-func (c *Cluster) StoragePoolNodeVolumeGetType(volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
-	volumeID, err := c.StoragePoolNodeVolumeGetTypeID(volumeName, volumeType, poolID)
+func (c *Cluster) StoragePoolNodeVolumesGetType(volumeType int, poolID int64) ([]string, error) {
+	return c.StoragePoolVolumesGetType(volumeType, poolID, c.nodeID)
+}
+
+// StoragePoolVolumeGetType returns a single storage volume attached to a
+// given storage pool of a given type, on the node with the given ID.
+func (c *Cluster) StoragePoolVolumeGetType(volumeName string, volumeType int, poolID, nodeID int64) (int64, *api.StorageVolume, error) {
+	volumeID, err := c.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID, nodeID)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -672,6 +684,12 @@ func (c *Cluster) StoragePoolNodeVolumeGetType(volumeName string, volumeType int
 	return volumeID, &storageVolume, nil
 }
 
+// Get a single storage volume attached to a given storage pool of a given
+// type, on the current node.
+func (c *Cluster) StoragePoolNodeVolumeGetType(volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
+	return c.StoragePoolVolumeGetType(volumeName, volumeType, poolID, c.nodeID)
+}
+
 // Update storage volume attached to a given storage pool.
 func (c *Cluster) StoragePoolVolumeUpdate(volumeName string, volumeType int, poolID int64, volumeDescription string, volumeConfig map[string]string) error {
 	volumeID, _, err := c.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
@@ -775,9 +793,9 @@ func (c *Cluster) StoragePoolVolumeCreate(volumeName, volumeDescription string,
 	return volumeID, nil
 }
 
-// Get ID of a storage volume on a given storage pool of a given storage volume
-// type.
-func (c *Cluster) StoragePoolNodeVolumeGetTypeID(volumeName string, volumeType int, poolID int64) (int64, error) {
+// StoragePoolVolumeGetTypeID returns the ID of a storage volume on a given
+// storage pool of a given storage volume type, on the given node.
+func (c *Cluster) StoragePoolVolumeGetTypeID(volumeName string, volumeType int, poolID, nodeID int64) (int64, error) {
 	volumeID := int64(-1)
 	query := `SELECT storage_volumes.id
 FROM storage_volumes
@@ -785,7 +803,7 @@ JOIN storage_pools
 ON storage_volumes.storage_pool_id = storage_pools.id
 WHERE storage_volumes.storage_pool_id=? AND storage_volumes.node_id=?
 AND storage_volumes.name=? AND storage_volumes.type=?`
-	inargs := []interface{}{poolID, c.nodeID, volumeName, volumeType}
+	inargs := []interface{}{poolID, nodeID, volumeName, volumeType}
 	outargs := []interface{}{&volumeID}
 
 	err := dbQueryRowScan(c.db, query, inargs, outargs)
@@ -796,6 +814,12 @@ AND storage_volumes.name=? AND storage_volumes.type=?`
 	return volumeID, nil
 }
 
+// Get ID of a storage volume on a given storage pool of a given storage volume
+// type, on the current node.
+func (c *Cluster) StoragePoolNodeVolumeGetTypeID(volumeName string, volumeType int, poolID int64) (int64, error) {
+	return c.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID, c.nodeID)
+}
+
 // XXX: this was extracted from lxd/storage_volume_utils.go, we find a way to
 //      factor it independently from both the db and main packages.
 const (

From ff04fd4b90b227dc4f9dd15f031d27abe658fe1b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 20 Jan 2018 17:09:58 +0000
Subject: [PATCH 205/227] Add NODE column to lxc storage volume list, if daemon
 is clustered

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/storage.go                    | 15 ++++++++++++---
 lxd/db/storage_pools.go           | 31 +++++++++++++++++++++++++++++++
 lxd/db/storage_volumes.go         | 21 +++++++++++++++++++++
 lxd/storage_volumes.go            |  2 +-
 shared/api/storage_pool_volume.go |  1 +
 test/suites/clustering.sh         |  5 +++++
 6 files changed, 71 insertions(+), 4 deletions(-)

diff --git a/lxc/storage.go b/lxc/storage.go
index 101f53df9..eb8ce9a25 100644
--- a/lxc/storage.go
+++ b/lxc/storage.go
@@ -899,18 +899,27 @@ func (c *storageCmd) doStoragePoolVolumesList(conf *config.Config, remote string
 	data := [][]string{}
 	for _, volume := range volumes {
 		usedby := strconv.Itoa(len(volume.UsedBy))
-		data = append(data, []string{volume.Type, volume.Name, volume.Description, usedby})
+		entry := []string{volume.Type, volume.Name, volume.Description, usedby}
+		if client.IsClustered() {
+			entry = append(entry, volume.Node)
+		}
+		data = append(data, entry)
 	}
 
 	table := tablewriter.NewWriter(os.Stdout)
 	table.SetAutoWrapText(false)
 	table.SetAlignment(tablewriter.ALIGN_LEFT)
 	table.SetRowLine(true)
-	table.SetHeader([]string{
+	header := []string{
 		i18n.G("TYPE"),
 		i18n.G("NAME"),
 		i18n.G("DESCRIPTION"),
-		i18n.G("USED BY")})
+		i18n.G("USED BY"),
+	}
+	if client.IsClustered() {
+		header = append(header, i18n.G("NODE"))
+	}
+	table.SetHeader(header)
 	sort.Sort(byNameAndType(data))
 	table.AppendBulk(data)
 	table.Render()
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 905d62b39..fb39de907 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -591,6 +591,31 @@ func (c *Cluster) StoragePoolVolumesGetNames(poolID int64) (int, error) {
 	return len(result), nil
 }
 
+// StoragePoolVolumesGet returns all storage volumes attached to a given
+// storage pool on any node.
+func (c *Cluster) StoragePoolVolumesGet(poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
+	var nodeIDs []int
+
+	err := c.Transaction(func(tx *ClusterTx) error {
+		var err error
+		nodeIDs, err = query.SelectIntegers(tx.tx, "SELECT DISTINCT node_id FROM storage_volumes WHERE storage_pool_id=?", poolID)
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	volumes := []*api.StorageVolume{}
+
+	for _, nodeID := range nodeIDs {
+		nodeVolumes, err := c.storagePoolVolumesGet(poolID, int64(nodeID), volumeTypes)
+		if err != nil {
+			return nil, err
+		}
+		volumes = append(volumes, nodeVolumes...)
+	}
+	return volumes, nil
+}
+
 // Get all storage volumes attached to a given storage pool on the current
 // node.
 func (c *Cluster) StoragePoolNodeVolumesGet(poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
@@ -659,6 +684,11 @@ func (c *Cluster) StoragePoolVolumeGetType(volumeName string, volumeType int, po
 		return -1, nil, err
 	}
 
+	volumeNode, err := c.StorageVolumeNodeGet(volumeID)
+	if err != nil {
+		return -1, nil, err
+	}
+
 	volumeConfig, err := c.StorageVolumeConfigGet(volumeID)
 	if err != nil {
 		return -1, nil, err
@@ -680,6 +710,7 @@ func (c *Cluster) StoragePoolVolumeGetType(volumeName string, volumeType int, po
 	storageVolume.Name = volumeName
 	storageVolume.Description = volumeDescription
 	storageVolume.Config = volumeConfig
+	storageVolume.Node = volumeNode
 
 	return volumeID, &storageVolume, nil
 }
diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index 92ad4def3..606a9e8d0 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -8,6 +8,27 @@ import (
 	_ "github.com/mattn/go-sqlite3"
 )
 
+// StorageVolumeNodeGet returns the name of the node a storage volume is on.
+func (c *Cluster) StorageVolumeNodeGet(volumeID int64) (string, error) {
+	name := ""
+	query := `
+SELECT nodes.name FROM storage_volumes
+  JOIN nodes ON nodes.id=storage_volumes.node_id
+   WHERE storage_volumes.id=?
+`
+	inargs := []interface{}{volumeID}
+	outargs := []interface{}{&name}
+
+	err := dbQueryRowScan(c.db, query, inargs, outargs)
+	if err != nil {
+		if err == sql.ErrNoRows {
+			return "", NoSuchObjectError
+		}
+	}
+
+	return name, nil
+}
+
 // Get config of a storage volume.
 func (c *Cluster) StorageVolumeConfigGet(volumeID int64) (map[string]string, error) {
 	var key, value string
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index f4edf93c4..247ea2bf5 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -35,7 +35,7 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) Response {
 
 	// Get all volumes currently attached to the storage pool by ID of the
 	// pool.
-	volumes, err := d.cluster.StoragePoolNodeVolumesGet(poolID, supportedVolumeTypes)
+	volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
 	if err != nil && err != db.NoSuchObjectError {
 		return SmartError(err)
 	}
diff --git a/shared/api/storage_pool_volume.go b/shared/api/storage_pool_volume.go
index 61e6b8732..b5c34a42c 100644
--- a/shared/api/storage_pool_volume.go
+++ b/shared/api/storage_pool_volume.go
@@ -25,6 +25,7 @@ type StorageVolume struct {
 	Name             string   `json:"name" yaml:"name"`
 	Type             string   `json:"type" yaml:"type"`
 	UsedBy           []string `json:"used_by" yaml:"used_by"`
+	Node             string   `json:"node" yaml:"node"`
 }
 
 // StorageVolumePut represents the modifiable fields of a LXD storage volume.
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 47e821014..1ab572561 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -289,6 +289,11 @@ test_clustering_storage() {
   LXD_DIR="${LXD_ONE_DIR}" lxc storage delete pool1
   ! LXD_DIR="${LXD_ONE_DIR}" lxc storage list | grep -q pool1
 
+  # Create a volume on node1
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage volume create data web
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage volume list data | grep -q node1
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume list data | grep -q node1
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From 44f9752de3bd6684de2f634b986a710fea86e029 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 20 Jan 2018 20:21:44 +0000
Subject: [PATCH 206/227] Support PUT/PATCH for storage pools

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/storage_pools.go       | 137 ++++++++++++++++++++++++++++++++++++++++-----
 lxd/storage_pools_utils.go |   7 ++-
 lxd/util/config.go         |   9 +++
 test/suites/clustering.sh  |   8 ++-
 4 files changed, 143 insertions(+), 18 deletions(-)

diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index fd0e003d1..779c3e360 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -323,26 +323,63 @@ func storagePoolPut(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	req := api.StoragePoolPut{}
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+		return BadRequest(err)
+	}
+
+	clustered, err := cluster.Enabled(d.db)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	config := dbInfo.Config
+	if clustered {
+		err := storagePoolValidateClusterConfig(req.Config)
+		if err != nil {
+			return BadRequest(err)
+		}
+		config = storagePoolClusterConfigForEtag(config)
+	}
+
 	// Validate the ETag
-	etag := []interface{}{dbInfo.Name, dbInfo.Driver, dbInfo.Config}
+	etag := []interface{}{dbInfo.Name, dbInfo.Driver, config}
 
 	err = util.EtagCheck(r, etag)
 	if err != nil {
 		return PreconditionFailed(err)
 	}
 
-	req := api.StoragePoolPut{}
-	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
-		return BadRequest(err)
-	}
-
 	// Validate the configuration
 	err = storagePoolValidateConfig(poolName, dbInfo.Driver, req.Config, dbInfo.Config)
 	if err != nil {
 		return BadRequest(err)
 	}
 
-	err = storagePoolUpdate(d.State(), poolName, req.Description, req.Config)
+	config = req.Config
+	if clustered {
+		// For clustered requests, we need to complement the request's config
+		// with our node-specific values.
+		config = storagePoolClusterFillWithNodeConfig(dbInfo.Config, config)
+	}
+
+	// Notify the other nodes, unless this is itself a notification.
+	if clustered && !isClusterNotification(r) {
+		cert := d.endpoints.NetworkCert()
+		notifier, err := cluster.NewNotifier(d.State(), cert, cluster.NotifyAll)
+		if err != nil {
+			return SmartError(err)
+		}
+		err = notifier(func(client lxd.ContainerServer) error {
+			return client.UpdateStoragePool(poolName, req, r.Header.Get("If-Match"))
+		})
+		if err != nil {
+			return SmartError(err)
+		}
+	}
+
+	withDB := !isClusterNotification(r)
+	err = storagePoolUpdate(d.State(), poolName, req.Description, config, withDB)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -361,19 +398,33 @@ func storagePoolPatch(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	req := api.StoragePoolPut{}
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+		return BadRequest(err)
+	}
+
+	clustered, err := cluster.Enabled(d.db)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	config := dbInfo.Config
+	if clustered {
+		err := storagePoolValidateClusterConfig(req.Config)
+		if err != nil {
+			return BadRequest(err)
+		}
+		config = storagePoolClusterConfigForEtag(config)
+	}
+
 	// Validate the ETag
-	etag := []interface{}{dbInfo.Name, dbInfo.Driver, dbInfo.Config}
+	etag := []interface{}{dbInfo.Name, dbInfo.Driver, config}
 
 	err = util.EtagCheck(r, etag)
 	if err != nil {
 		return PreconditionFailed(err)
 	}
 
-	req := api.StoragePoolPut{}
-	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
-		return BadRequest(err)
-	}
-
 	// Config stacking
 	if req.Config == nil {
 		req.Config = map[string]string{}
@@ -392,7 +443,30 @@ func storagePoolPatch(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
-	err = storagePoolUpdate(d.State(), poolName, req.Description, req.Config)
+	config = req.Config
+	if clustered {
+		// For clustered requests, we need to complement the request's config
+		// with our node-specific values.
+		config = storagePoolClusterFillWithNodeConfig(dbInfo.Config, config)
+	}
+
+	// Notify the other nodes, unless this is itself a notification.
+	if clustered && !isClusterNotification(r) {
+		cert := d.endpoints.NetworkCert()
+		notifier, err := cluster.NewNotifier(d.State(), cert, cluster.NotifyAll)
+		if err != nil {
+			return SmartError(err)
+		}
+		err = notifier(func(client lxd.ContainerServer) error {
+			return client.UpdateStoragePool(poolName, req, r.Header.Get("If-Match"))
+		})
+		if err != nil {
+			return SmartError(err)
+		}
+	}
+
+	withDB := !isClusterNotification(r)
+	err = storagePoolUpdate(d.State(), poolName, req.Description, config, withDB)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -400,6 +474,41 @@ func storagePoolPatch(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
+// This helper makes sure that, when clustered, we're not changing
+// node-specific values.
+//
+// POSSIBLY TODO: for now we don't have any node-specific values that can be
+// modified. If we ever get some, we'll need to extend the PUT/PATCH APIs to
+// accept a targetNode query parameter.
+func storagePoolValidateClusterConfig(reqConfig map[string]string) error {
+	for key := range reqConfig {
+		if shared.StringInSlice(key, db.StoragePoolNodeConfigKeys) {
+			return fmt.Errorf("node-specific config key %s can't be changed", key)
+		}
+	}
+	return nil
+}
+
+// This helper deletes any node-specific values from the config object, since
+// they should not be part of the calculated etag.
+func storagePoolClusterConfigForEtag(dbConfig map[string]string) map[string]string {
+	config := util.CopyConfig(dbConfig)
+	for _, key := range db.StoragePoolNodeConfigKeys {
+		delete(config, key)
+	}
+	return config
+}
+
+// This helper complements a PUT/PATCH request config with node-specific value,
+// as taken from the db.
+func storagePoolClusterFillWithNodeConfig(dbConfig, reqConfig map[string]string) map[string]string {
+	config := util.CopyConfig(reqConfig)
+	for _, key := range db.StoragePoolNodeConfigKeys {
+		config[key] = dbConfig[key]
+	}
+	return config
+}
+
 // /1.0/storage-pools/{name}
 // Delete storage pool.
 func storagePoolDelete(d *Daemon, r *http.Request) Response {
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index b8dbf953a..015d09d08 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -10,7 +10,7 @@ import (
 	"github.com/lxc/lxd/shared/version"
 )
 
-func storagePoolUpdate(state *state.State, name, newDescription string, newConfig map[string]string) error {
+func storagePoolUpdate(state *state.State, name, newDescription string, newConfig map[string]string, withDB bool) error {
 	s, err := storagePoolInit(state, name)
 	if err != nil {
 		return err
@@ -60,8 +60,9 @@ func storagePoolUpdate(state *state.State, name, newDescription string, newConfi
 		s.SetStoragePoolWritable(&newWritable)
 	}
 
-	// Update the database if something changed
-	if len(changedConfig) != 0 || newDescription != oldDescription {
+	// Update the database if something changed and the withDB flag is true
+	// (i.e. this is not a clustering notification.
+	if withDB && (len(changedConfig) != 0 || newDescription != oldDescription) {
 		err = state.Cluster.StoragePoolUpdate(name, newDescription, newConfig)
 		if err != nil {
 			return err
diff --git a/lxd/util/config.go b/lxd/util/config.go
index 4fb3dd6c8..f8c0969dc 100644
--- a/lxd/util/config.go
+++ b/lxd/util/config.go
@@ -47,3 +47,12 @@ func CompareConfigs(config1, config2 map[string]string, exclude []string) error
 
 	return nil
 }
+
+// CopyConfig creates a new map with a copy of the given config.
+func CopyConfig(config map[string]string) map[string]string {
+	copy := map[string]string{}
+	for key, value := range config {
+		copy[key] = value
+	}
+	return copy
+}
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 1ab572561..08a4f1d9b 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -285,6 +285,12 @@ test_clustering_storage() {
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 --target node1 | grep source | grep -q "$(basename "${LXD_ONE_DIR}")"
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 --target node2 | grep source | grep -q "$(basename "${LXD_TWO_DIR}")"
 
+  # Update the storage pool
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage set pool1 rsync.bwlimit 10
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage show pool1 | grep rsync.bwlimit | grep -q 10
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage unset pool1 rsync.bwlimit
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep -q rsync.bwlimit
+
   # Delete the storage pool
   LXD_DIR="${LXD_ONE_DIR}" lxc storage delete pool1
   ! LXD_DIR="${LXD_ONE_DIR}" lxc storage list | grep -q pool1
@@ -333,7 +339,7 @@ test_clustering_network() {
 
   # Trying to pass config values other than
   # 'bridge.external_interfaces' results in an error
-  ! LXD_DIR="${LXD_ONE_DIR}" lxc storage create network foo ipv4.address=auto --target node1
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc create network foo ipv4.address=auto --target node1
 
   net="${bridge}x"
 

From 89cd4d626be4b2f9c7779a7b864363b33e7245a1 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sun, 21 Jan 2018 14:18:08 +0000
Subject: [PATCH 207/227] Add StorageVolumeNodeAddresses returning node
 addresses of a volume

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/storage_volumes.go      | 47 +++++++++++++++++++++++++++++++++++-
 lxd/db/storage_volumes_test.go | 55 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 101 insertions(+), 1 deletion(-)
 create mode 100644 lxd/db/storage_volumes_test.go

diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index 606a9e8d0..ca7d331e8 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -3,11 +3,56 @@ package db
 import (
 	"database/sql"
 	"fmt"
+	"sort"
 
 	"github.com/lxc/lxd/lxd/db/query"
-	_ "github.com/mattn/go-sqlite3"
 )
 
+// StorageVolumeNodeAddresses returns the addresses of all nodes on which the
+// volume with the given name if defined.
+//
+// The empty string is used in place of the address of the current node.
+func (c *ClusterTx) StorageVolumeNodeAddresses(poolID int64, name string, typ int) ([]string, error) {
+	nodes := []struct {
+		id      int64
+		address string
+	}{}
+	dest := func(i int) []interface{} {
+		nodes = append(nodes, struct {
+			id      int64
+			address string
+		}{})
+		return []interface{}{&nodes[i].id, &nodes[i].address}
+
+	}
+	stmt := `
+SELECT nodes.id, nodes.address
+  FROM nodes JOIN storage_volumes ON storage_volumes.node_id=nodes.id
+    WHERE storage_volumes.storage_pool_id=? AND storage_volumes.name=? AND storage_volumes.type=?
+`
+	err := query.SelectObjects(c.tx, dest, stmt, poolID, name, typ)
+	if err != nil {
+		return nil, err
+	}
+
+	addresses := []string{}
+	for _, node := range nodes {
+		address := node.address
+		if node.id == c.nodeID {
+			address = ""
+		}
+		addresses = append(addresses, address)
+	}
+
+	sort.Strings(addresses)
+
+	if len(addresses) == 0 {
+		return nil, NoSuchObjectError
+	}
+
+	return addresses, nil
+}
+
 // StorageVolumeNodeGet returns the name of the node a storage volume is on.
 func (c *Cluster) StorageVolumeNodeGet(volumeID int64) (string, error) {
 	name := ""
diff --git a/lxd/db/storage_volumes_test.go b/lxd/db/storage_volumes_test.go
new file mode 100644
index 000000000..1ebe5b725
--- /dev/null
+++ b/lxd/db/storage_volumes_test.go
@@ -0,0 +1,55 @@
+package db_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Addresses of all nodes with matching volume name are returned.
+func TestStorageVolumeNodeAddresses(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	nodeID1 := int64(1) // This is the default local node
+
+	nodeID2, err := tx.NodeAdd("node2", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	nodeID3, err := tx.NodeAdd("node3", "5.6.7.8:666")
+	require.NoError(t, err)
+
+	poolID := addPool(t, tx, "pool1")
+	addVolume(t, tx, poolID, nodeID1, "volume1")
+	addVolume(t, tx, poolID, nodeID2, "volume1")
+	addVolume(t, tx, poolID, nodeID3, "volume2")
+	addVolume(t, tx, poolID, nodeID2, "volume2")
+
+	addresses, err := tx.StorageVolumeNodeAddresses(poolID, "volume1", 1)
+	require.NoError(t, err)
+
+	assert.Equal(t, []string{"", "1.2.3.4:666"}, addresses)
+}
+
+func addPool(t *testing.T, tx *db.ClusterTx, name string) int64 {
+	stmt := `
+INSERT INTO storage_pools(name, driver) VALUES (?, 'dir')
+`
+	result, err := tx.Tx().Exec(stmt, name)
+	require.NoError(t, err)
+
+	id, err := result.LastInsertId()
+	require.NoError(t, err)
+
+	return id
+}
+
+func addVolume(t *testing.T, tx *db.ClusterTx, poolID, nodeID int64, name string) {
+	stmt := `
+INSERT INTO storage_volumes(storage_pool_id, node_id, name, type) VALUES (?, ?, ?, 1)
+`
+	_, err := tx.Tx().Exec(stmt, poolID, nodeID, name)
+	require.NoError(t, err)
+}

From 8a6fa2a09a44aa45dbec17dc32b3a71042389a56 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sun, 21 Jan 2018 15:39:51 +0000
Subject: [PATCH 208/227] Support lxc storage show volume on a remote node, if
 the match is unique

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/connect.go    | 28 ++++++++++++++++++++++++++++
 lxd/response.go           | 16 ++++++++++++++++
 lxd/storage_volumes.go    | 16 ++++++++++++++++
 test/suites/clustering.sh |  4 ++++
 4 files changed, 64 insertions(+)

diff --git a/lxd/cluster/connect.go b/lxd/cluster/connect.go
index d8f58e667..0c2317bf0 100644
--- a/lxd/cluster/connect.go
+++ b/lxd/cluster/connect.go
@@ -49,3 +49,31 @@ func ConnectIfContainerIsRemote(cluster *db.Cluster, name string, cert *shared.C
 	}
 	return Connect(address, cert, false)
 }
+
+// ConnectIfVolumeIsRemote figures out the address of the node on which the
+// volume with the given name is defined. If it's not the local node will
+// connect to it and return the connected client, otherwise it will just return
+// nil.
+//
+// If there is more than one node with a matching volume name, an error is
+// returned.
+func ConnectIfVolumeIsRemote(cluster *db.Cluster, poolID int64, volumeName string, volumeType int, cert *shared.CertInfo) (lxd.ContainerServer, error) {
+	var addresses []string // Node addresses
+	err := cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		addresses, err = tx.StorageVolumeNodeAddresses(poolID, volumeName, volumeType)
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	if len(addresses) > 1 {
+		return nil, fmt.Errorf("more than one node has a volume named %s", volumeName)
+	}
+
+	address := addresses[0]
+	if address == "" {
+		return nil, nil
+	}
+	return Connect(address, cert, false)
+}
diff --git a/lxd/response.go b/lxd/response.go
index 4db6702b5..3be888cf3 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -174,6 +174,22 @@ func ForwardedResponseIfContainerIsRemote(d *Daemon, r *http.Request, name strin
 	return ForwardedResponse(client, r), nil
 }
 
+// ForwardedResponseIfVolumeIsRemote redirects a request to the node hosting
+// the volume with the given pool ID, name and type. If the container is local,
+// nothing gets done and nil is returned. If more than one node has a matching
+// volume, an error is returned.
+func ForwardedResponseIfVolumeIsRemote(d *Daemon, r *http.Request, poolID int64, volumeName string, volumeType int) (Response, error) {
+	cert := d.endpoints.NetworkCert()
+	client, err := cluster.ConnectIfVolumeIsRemote(d.cluster, poolID, volumeName, volumeType, cert)
+	if err != nil {
+		return nil, err
+	}
+	if client == nil {
+		return nil, nil
+	}
+	return ForwardedResponse(client, r), nil
+}
+
 // File transfer response
 type fileResponseEntry struct {
 	identifier string
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 247ea2bf5..5cfb2058e 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -286,6 +286,22 @@ func storagePoolVolumeTypeGet(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	targetNode := r.FormValue("targetNode")
+	if targetNode == "" {
+		// If not target node is specified, check if we need to forward
+		// the request to the appropriate node (as long as there's only
+		// one match).
+		response, err := ForwardedResponseIfVolumeIsRemote(d, r, poolID, volumeName, volumeType)
+		if err != nil {
+			return SmartError(err)
+		}
+		if response != nil {
+			return response
+		}
+		// The specified target node is the current node, so just go
+		// ahead normally.
+	}
+
 	// Get the storage volume.
 	_, volume, err := d.cluster.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 08a4f1d9b..102504943 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -300,6 +300,10 @@ test_clustering_storage() {
   LXD_DIR="${LXD_ONE_DIR}" lxc storage volume list data | grep -q node1
   LXD_DIR="${LXD_TWO_DIR}" lxc storage volume list data | grep -q node1
 
+  # Since the volume name is unique to node1, it's possible to show
+  # the volume without specifying the --target parameter.
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume show data web | grep -q "node: node1"
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From 7a593684f55fddf8c00c65ab15fa2fa502cb90f3 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sun, 21 Jan 2018 15:57:45 +0000
Subject: [PATCH 209/227] Support --target parameter in lxc storage volume show

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/lxd_storage_volumes.go |  8 +++++++-
 doc/api-extensions.md         |  1 +
 lxc/storage.go                | 10 ++++++++--
 lxd/response.go               | 42 ++++++++++++++++++++++++++++++++++++++++++
 lxd/storage_volumes.go        |  5 +++++
 test/suites/clustering.sh     | 12 ++++++++++++
 6 files changed, 75 insertions(+), 3 deletions(-)

diff --git a/client/lxd_storage_volumes.go b/client/lxd_storage_volumes.go
index 1aa576e7f..11c9ed8d9 100644
--- a/client/lxd_storage_volumes.go
+++ b/client/lxd_storage_volumes.go
@@ -52,7 +52,13 @@ func (r *ProtocolLXD) GetStoragePoolVolume(pool string, volType string, name str
 	volume := api.StorageVolume{}
 
 	// Fetch the raw value
-	etag, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.QueryEscape(pool), url.QueryEscape(volType), url.QueryEscape(name)), nil, "", &volume)
+	path := fmt.Sprintf(
+		"/storage-pools/%s/volumes/%s/%s",
+		url.QueryEscape(pool), url.QueryEscape(volType), url.QueryEscape(name))
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	etag, err := r.queryStruct("GET", path, nil, "", &volume)
 	if err != nil {
 		return nil, "", err
 	}
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 9283aab10..be56611d9 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -415,3 +415,4 @@ The following existing endpoints have been modified:
  * `POST /1.0/containers` accepts a new targetNode query parameter
  * `POST /1.0/storage-pools` accepts a new targetNode query parameter
  * `GET /1.0/storage-pool/<name>` accepts a new targetNode query parameter
+ * `GET /1.0/storage-pool/<pool>/volumes/<type>/<name>` accepts a new targetNode query parameter
diff --git a/lxc/storage.go b/lxc/storage.go
index eb8ce9a25..56f930784 100644
--- a/lxc/storage.go
+++ b/lxc/storage.go
@@ -100,8 +100,8 @@ lxc storage edit [<remote>:]<pool>
 lxc storage volume list [<remote>:]<pool>
     List available storage volumes on a storage pool.
 
-lxc storage volume show [<remote>:]<pool> <volume>
-    Show details of a storage volume on a storage pool.
+lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]
+   Show details of a storage volume on a storage pool.
 
 lxc storage volume create [<remote>:]<pool> <volume> [key=value]...
     Create a storage volume on a storage pool.
@@ -1039,6 +1039,12 @@ func (c *storageCmd) doStoragePoolVolumeShow(client lxd.ContainerServer, pool st
 	// Parse the input
 	volName, volType := c.parseVolume(volume)
 
+	// If a target node was specified, get the volume with the matching
+	// name on that node, if any.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	// Get the storage volume entry
 	vol, _, err := client.GetStoragePoolVolume(pool, volType, volName)
 	if err != nil {
diff --git a/lxd/response.go b/lxd/response.go
index 3be888cf3..85a8d5760 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -159,6 +159,48 @@ func ForwardedResponse(client lxd.ContainerServer, request *http.Request) Respon
 	}
 }
 
+// ForwardedResponseIfTargetIsRemote redirects a request to the request has a
+// targetNode parameter pointing to a node which is not the local one.
+func ForwardedResponseIfTargetIsRemote(d *Daemon, request *http.Request) Response {
+	targetNode := request.FormValue("targetNode")
+	if targetNode == "" {
+		return nil
+	}
+
+	// Figure out the address of the target node (which is possibly
+	// this very same node).
+	var address string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		localNode, err := tx.NodeName()
+		if err != nil {
+			return err
+		}
+		if targetNode == localNode {
+			return nil
+		}
+		node, err := tx.NodeByName(targetNode)
+		if err != nil {
+			return err
+		}
+		address = node.Address
+		return nil
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+	if address != "" {
+		// Forward the response.
+		cert := d.endpoints.NetworkCert()
+		client, err := cluster.Connect(address, cert, false)
+		if err != nil {
+			return SmartError(err)
+		}
+		return ForwardedResponse(client, request)
+	}
+
+	return nil
+}
+
 // ForwardedResponseIfContainerIsRemote redirects a request to the node running
 // the container with the given name. If the container is local, nothing gets
 // done and nil is returned.
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 5cfb2058e..85bf3b8e9 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -286,6 +286,11 @@ func storagePoolVolumeTypeGet(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	response := ForwardedResponseIfTargetIsRemote(d, r)
+	if response != nil {
+		return response
+	}
+
 	targetNode := r.FormValue("targetNode")
 	if targetNode == "" {
 		// If not target node is specified, check if we need to forward
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 102504943..b9cd08fe3 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -304,6 +304,18 @@ test_clustering_storage() {
   # the volume without specifying the --target parameter.
   LXD_DIR="${LXD_TWO_DIR}" lxc storage volume show data web | grep -q "node: node1"
 
+  # Create another volume on node2 with the same name of the one on
+  # node1.
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume create data web
+
+  # Trying to show the web volume without --target fails, because it's
+  # not unique
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc storage volume show data web
+
+  # Specifying the --target parameter gets the proper volume
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume show --target node1 data web | grep -q "node: node1"
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume show --target node2 data web | grep -q "node: node2"
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From efa129eba33f81edbc4f9036a756e5db718d7376 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sun, 21 Jan 2018 16:20:36 +0000
Subject: [PATCH 210/227] Support --target in lxc storage volume create

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/lxd_storage_volumes.go | 7 ++++++-
 doc/api-extensions.md         | 1 +
 lxc/storage.go                | 7 ++++++-
 lxd/storage_volumes.go        | 5 +++++
 test/suites/clustering.sh     | 2 +-
 5 files changed, 19 insertions(+), 3 deletions(-)

diff --git a/client/lxd_storage_volumes.go b/client/lxd_storage_volumes.go
index 11c9ed8d9..0be4a4639 100644
--- a/client/lxd_storage_volumes.go
+++ b/client/lxd_storage_volumes.go
@@ -69,7 +69,12 @@ func (r *ProtocolLXD) GetStoragePoolVolume(pool string, volType string, name str
 // CreateStoragePoolVolume defines a new storage volume
 func (r *ProtocolLXD) CreateStoragePoolVolume(pool string, volume api.StorageVolumesPost) error {
 	// Send the request
-	_, _, err := r.query("POST", fmt.Sprintf("/storage-pools/%s/volumes/%s", url.QueryEscape(pool), url.QueryEscape(volume.Type)), volume, "")
+	path := fmt.Sprintf(
+		"/storage-pools/%s/volumes/%s", url.QueryEscape(pool), url.QueryEscape(volume.Type))
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	_, _, err := r.query("POST", path, volume, "")
 	if err != nil {
 		return err
 	}
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index be56611d9..84d26c5bc 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -415,4 +415,5 @@ The following existing endpoints have been modified:
  * `POST /1.0/containers` accepts a new targetNode query parameter
  * `POST /1.0/storage-pools` accepts a new targetNode query parameter
  * `GET /1.0/storage-pool/<name>` accepts a new targetNode query parameter
+ * `POST /1.0/storage-pool/<pool>/volumes/<type>` accepts a new targetNode query parameter
  * `GET /1.0/storage-pool/<pool>/volumes/<type>/<name>` accepts a new targetNode query parameter
diff --git a/lxc/storage.go b/lxc/storage.go
index 56f930784..d1e60cbef 100644
--- a/lxc/storage.go
+++ b/lxc/storage.go
@@ -103,7 +103,7 @@ lxc storage volume list [<remote>:]<pool>
 lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]
    Show details of a storage volume on a storage pool.
 
-lxc storage volume create [<remote>:]<pool> <volume> [key=value]...
+lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--target <node>]
     Create a storage volume on a storage pool.
 
 lxc storage volume rename [<remote>:]<pool> <old name> <new name>
@@ -946,6 +946,11 @@ func (c *storageCmd) doStoragePoolVolumeCreate(client lxd.ContainerServer, pool
 		vol.Config[entry[0]] = entry[1]
 	}
 
+	// If a target was specified, create the volume on the given node.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	err := client.CreateStoragePoolVolume(pool, vol)
 	if err != nil {
 		return err
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 85bf3b8e9..3d885c4a9 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -142,6 +142,11 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response {
 // /1.0/storage-pools/{name}/volumes/{type}
 // Create a storage volume of a given volume type in a given storage pool.
 func storagePoolVolumesTypePost(d *Daemon, r *http.Request) Response {
+	response := ForwardedResponseIfTargetIsRemote(d, r)
+	if response != nil {
+		return response
+	}
+
 	req := api.StorageVolumesPost{}
 
 	// Parse the request.
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index b9cd08fe3..5d2546f4a 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -306,7 +306,7 @@ test_clustering_storage() {
 
   # Create another volume on node2 with the same name of the one on
   # node1.
-  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume create data web
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage volume create --target node2 data web
 
   # Trying to show the web volume without --target fails, because it's
   # not unique

From 30fd6278e5e2a027c4364e4076329fad315f64df Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 22 Jan 2018 09:17:26 +0000
Subject: [PATCH 211/227] Support --target in lxc storage volume
 rename/edit/get/set/unset/delete

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/lxd_storage_volumes.go | 24 +++++++++++--
 lxc/storage.go                | 38 +++++++++++++++++----
 lxd/cluster/connect.go        |  1 +
 lxd/response.go               | 15 ++++++---
 lxd/storage_volumes.go        | 78 +++++++++++++++++++++++++++++++++++--------
 test/suites/clustering.sh     | 23 ++++++++++---
 6 files changed, 147 insertions(+), 32 deletions(-)

diff --git a/client/lxd_storage_volumes.go b/client/lxd_storage_volumes.go
index 0be4a4639..da2d158f6 100644
--- a/client/lxd_storage_volumes.go
+++ b/client/lxd_storage_volumes.go
@@ -85,7 +85,13 @@ func (r *ProtocolLXD) CreateStoragePoolVolume(pool string, volume api.StorageVol
 // UpdateStoragePoolVolume updates the volume to match the provided StoragePoolVolume struct
 func (r *ProtocolLXD) UpdateStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePut, ETag string) error {
 	// Send the request
-	_, _, err := r.query("PUT", fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.QueryEscape(pool), url.QueryEscape(volType), url.QueryEscape(name)), volume, ETag)
+	path := fmt.Sprintf(
+		"/storage-pools/%s/volumes/%s/%s",
+		url.QueryEscape(pool), url.QueryEscape(volType), url.QueryEscape(name))
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	_, _, err := r.query("PUT", path, volume, ETag)
 	if err != nil {
 		return err
 	}
@@ -96,7 +102,13 @@ func (r *ProtocolLXD) UpdateStoragePoolVolume(pool string, volType string, name
 // DeleteStoragePoolVolume deletes a storage pool
 func (r *ProtocolLXD) DeleteStoragePoolVolume(pool string, volType string, name string) error {
 	// Send the request
-	_, _, err := r.query("DELETE", fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.QueryEscape(pool), url.QueryEscape(volType), url.QueryEscape(name)), nil, "")
+	path := fmt.Sprintf(
+		"/storage-pools/%s/volumes/%s/%s",
+		url.QueryEscape(pool), url.QueryEscape(volType), url.QueryEscape(name))
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	_, _, err := r.query("DELETE", path, nil, "")
 	if err != nil {
 		return err
 	}
@@ -109,9 +121,15 @@ func (r *ProtocolLXD) RenameStoragePoolVolume(pool string, volType string, name
 	if !r.HasExtension("storage_api_volume_rename") {
 		return fmt.Errorf("The server is missing the required \"storage_api_volume_rename\" API extension")
 	}
+	path := fmt.Sprintf(
+		"/storage-pools/%s/volumes/%s/%s",
+		url.QueryEscape(pool), url.QueryEscape(volType), url.QueryEscape(name))
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
 
 	// Send the request
-	_, _, err := r.query("POST", fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.QueryEscape(pool), url.QueryEscape(volType), url.QueryEscape(name)), volume, "")
+	_, _, err := r.query("POST", path, volume, "")
 	if err != nil {
 		return err
 	}
diff --git a/lxc/storage.go b/lxc/storage.go
index d1e60cbef..2e1752cdd 100644
--- a/lxc/storage.go
+++ b/lxc/storage.go
@@ -106,22 +106,22 @@ lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]
 lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--target <node>]
     Create a storage volume on a storage pool.
 
-lxc storage volume rename [<remote>:]<pool> <old name> <new name>
+lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target <node>]
     Rename a storage volume on a storage pool.
 
-lxc storage volume get [<remote>:]<pool> <volume> <key>
+lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]
     Get storage volume configuration on a storage pool.
 
-lxc storage volume set [<remote>:]<pool> <volume> <key> <value>
+lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target <node>]
     Set storage volume configuration on a storage pool.
 
-lxc storage volume unset [<remote>:]<pool> <volume> <key>
+lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]
     Unset storage volume configuration on a storage pool.
 
-lxc storage volume delete [<remote>:]<pool> <volume>
+lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]
     Delete a storage volume on a storage pool.
 
-lxc storage volume edit [<remote>:]<pool> <volume>
+lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]
     Edit storage volume, either by launching external editor or reading STDIN.
 
 lxc storage volume attach [<remote>:]<pool> <volume> <container> [device name] <path>
@@ -965,6 +965,11 @@ func (c *storageCmd) doStoragePoolVolumeDelete(client lxd.ContainerServer, pool
 	// Parse the input
 	volName, volType := c.parseVolume(volume)
 
+	// If a target was specified, create the volume on the given node.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	// Delete the volume
 	err := client.DeleteStoragePoolVolume(pool, volType, volName)
 	if err != nil {
@@ -984,6 +989,11 @@ func (c *storageCmd) doStoragePoolVolumeGet(client lxd.ContainerServer, pool str
 	// Parse input
 	volName, volType := c.parseVolume(volume)
 
+	// If a target was specified, create the volume on the given node.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	// Get the storage volume entry
 	resp, _, err := client.GetStoragePoolVolume(pool, volType, volName)
 	if err != nil {
@@ -1007,6 +1017,11 @@ func (c *storageCmd) doStoragePoolVolumeSet(client lxd.ContainerServer, pool str
 	// Parse the input
 	volName, volType := c.parseVolume(volume)
 
+	// If a target was specified, create the volume on the given node.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	// Get the storage volume entry
 	vol, etag, err := client.GetStoragePoolVolume(pool, volType, volName)
 	if err != nil {
@@ -1088,6 +1103,11 @@ func (c *storageCmd) doStoragePoolVolumeEdit(client lxd.ContainerServer, pool st
 		return client.UpdateStoragePoolVolume(pool, volType, volName, newdata, "")
 	}
 
+	// If a target was specified, create the volume on the given node.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	// Extract the current value
 	vol, etag, err := client.GetStoragePoolVolume(pool, volType, volName)
 	if err != nil {
@@ -1142,6 +1162,12 @@ func (c *storageCmd) doStoragePoolVolumeRename(client lxd.ContainerServer, pool
 	vol := api.StorageVolumePost{}
 	vol.Name = args[4]
 
+	// If a target node was specified, get the volume with the matching
+	// name on that node, if any.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	err := client.RenameStoragePoolVolume(pool, volType, volName, vol)
 	if err != nil {
 		return err
diff --git a/lxd/cluster/connect.go b/lxd/cluster/connect.go
index 0c2317bf0..efbb3eb0d 100644
--- a/lxd/cluster/connect.go
+++ b/lxd/cluster/connect.go
@@ -67,6 +67,7 @@ func ConnectIfVolumeIsRemote(cluster *db.Cluster, poolID int64, volumeName strin
 	if err != nil {
 		return nil, err
 	}
+
 	if len(addresses) > 1 {
 		return nil, fmt.Errorf("more than one node has a volume named %s", volumeName)
 	}
diff --git a/lxd/response.go b/lxd/response.go
index 85a8d5760..c2408b445 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -220,16 +220,23 @@ func ForwardedResponseIfContainerIsRemote(d *Daemon, r *http.Request, name strin
 // the volume with the given pool ID, name and type. If the container is local,
 // nothing gets done and nil is returned. If more than one node has a matching
 // volume, an error is returned.
-func ForwardedResponseIfVolumeIsRemote(d *Daemon, r *http.Request, poolID int64, volumeName string, volumeType int) (Response, error) {
+//
+// This is used when no targetNode is specified, and saves users some typing
+// when the volume name/type is unique to a node.
+func ForwardedResponseIfVolumeIsRemote(d *Daemon, r *http.Request, poolID int64, volumeName string, volumeType int) Response {
+	if r.FormValue("targetNode") != "" {
+		return nil
+	}
+
 	cert := d.endpoints.NetworkCert()
 	client, err := cluster.ConnectIfVolumeIsRemote(d.cluster, poolID, volumeName, volumeType, cert)
 	if err != nil {
-		return nil, err
+		return SmartError(err)
 	}
 	if client == nil {
-		return nil, nil
+		return nil
 	}
-	return ForwardedResponse(client, r), nil
+	return ForwardedResponse(client, r)
 }
 
 // File transfer response
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 3d885c4a9..9e1cfd652 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -1,6 +1,7 @@
 package main
 
 import (
+	"bytes"
 	"encoding/json"
 	"fmt"
 	"net/http"
@@ -241,6 +242,31 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	// We need to restore the body of the request since it has already been
+	// read, and if we forwarded it now no body would be written out.
+	buf := bytes.Buffer{}
+	err = json.NewEncoder(&buf).Encode(req)
+	if err != nil {
+		return SmartError(err)
+	}
+	r.Body = shared.BytesReadCloser{Buf: &buf}
+
+	response := ForwardedResponseIfTargetIsRemote(d, r)
+	if response != nil {
+		return response
+	}
+
+	// Convert the volume type name to our internal integer representation.
+	volumeType, err := storagePoolVolumeTypeNameToType(volumeTypeName)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	response = ForwardedResponseIfVolumeIsRemote(d, r, poolID, volumeName, volumeType)
+	if response != nil {
+		return response
+	}
+
 	// Check that the name isn't already in use.
 	_, err = d.cluster.StoragePoolNodeVolumeGetTypeID(req.Name,
 		storagePoolVolumeTypeCustom, poolID)
@@ -296,20 +322,9 @@ func storagePoolVolumeTypeGet(d *Daemon, r *http.Request) Response {
 		return response
 	}
 
-	targetNode := r.FormValue("targetNode")
-	if targetNode == "" {
-		// If not target node is specified, check if we need to forward
-		// the request to the appropriate node (as long as there's only
-		// one match).
-		response, err := ForwardedResponseIfVolumeIsRemote(d, r, poolID, volumeName, volumeType)
-		if err != nil {
-			return SmartError(err)
-		}
-		if response != nil {
-			return response
-		}
-		// The specified target node is the current node, so just go
-		// ahead normally.
+	response = ForwardedResponseIfVolumeIsRemote(d, r, poolID, volumeName, volumeType)
+	if response != nil {
+		return response
 	}
 
 	// Get the storage volume.
@@ -356,6 +371,16 @@ func storagePoolVolumeTypePut(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	response := ForwardedResponseIfTargetIsRemote(d, r)
+	if response != nil {
+		return response
+	}
+
+	response = ForwardedResponseIfVolumeIsRemote(d, r, poolID, volumeName, volumeType)
+	if response != nil {
+		return response
+	}
+
 	// Get the existing storage volume.
 	_, volume, err := d.cluster.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
@@ -418,6 +443,16 @@ func storagePoolVolumeTypePatch(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	response := ForwardedResponseIfTargetIsRemote(d, r)
+	if response != nil {
+		return response
+	}
+
+	response = ForwardedResponseIfVolumeIsRemote(d, r, poolID, volumeName, volumeType)
+	if response != nil {
+		return response
+	}
+
 	// Get the existing storage volume.
 	_, volume, err := d.cluster.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
@@ -484,6 +519,21 @@ func storagePoolVolumeTypeDelete(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("invalid storage volume type %s", volumeTypeName))
 	}
 
+	response := ForwardedResponseIfTargetIsRemote(d, r)
+	if response != nil {
+		return response
+	}
+
+	poolID, _, err := d.cluster.StoragePoolGet(poolName)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	response = ForwardedResponseIfVolumeIsRemote(d, r, poolID, volumeName, volumeType)
+	if response != nil {
+		return response
+	}
+
 	switch volumeType {
 	case storagePoolVolumeTypeCustom:
 		// allowed
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 5d2546f4a..0f8bf7173 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -300,21 +300,34 @@ test_clustering_storage() {
   LXD_DIR="${LXD_ONE_DIR}" lxc storage volume list data | grep -q node1
   LXD_DIR="${LXD_TWO_DIR}" lxc storage volume list data | grep -q node1
 
-  # Since the volume name is unique to node1, it's possible to show
-  # the volume without specifying the --target parameter.
+  # Since the volume name is unique to node1, it's possible to show, rename,
+  # get the volume without specifying the --target parameter.
   LXD_DIR="${LXD_TWO_DIR}" lxc storage volume show data web | grep -q "node: node1"
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage volume rename data web webbaz
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume rename data webbaz web
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume get data web size
 
   # Create another volume on node2 with the same name of the one on
   # node1.
   LXD_DIR="${LXD_ONE_DIR}" lxc storage volume create --target node2 data web
 
-  # Trying to show the web volume without --target fails, because it's
-  # not unique
+  # Trying to show, rename or delete the web volume without --target
+  # fails, because it's not unique.
   ! LXD_DIR="${LXD_TWO_DIR}" lxc storage volume show data web
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc storage volume rename data web webbaz
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc storage volume delete data web
 
-  # Specifying the --target parameter gets the proper volume
+  # Specifying the --target parameter shows, renames and deletes the
+  # proper volume.
   LXD_DIR="${LXD_TWO_DIR}" lxc storage volume show --target node1 data web | grep -q "node: node1"
   LXD_DIR="${LXD_TWO_DIR}" lxc storage volume show --target node2 data web | grep -q "node: node2"
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume rename --target node1 data web webbaz
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume rename --target node2 data web webbaz
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume delete --target node2 data webbaz
+
+  # Since now there's only one volume in the pool left named webbaz,
+  # it's possible to delete it without specifying --target.
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage volume delete data webbaz
 
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown

From b878639720a1dc3edcbb306d4ca4d33f9cfd9b9c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 22 Jan 2018 10:13:10 +0000
Subject: [PATCH 212/227] Fix race condition in heartbeatFixture.Grow when
 adding a node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/heartbeat_test.go | 22 +++++++++++++++++++---
 1 file changed, 19 insertions(+), 3 deletions(-)

diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
index f4e3a5368..6b2a8e598 100644
--- a/lxd/cluster/heartbeat_test.go
+++ b/lxd/cluster/heartbeat_test.go
@@ -6,6 +6,7 @@ import (
 	"testing"
 	"time"
 
+	"github.com/hashicorp/raft"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
@@ -136,12 +137,27 @@ func (f *heartbeatFixture) Bootstrap() *cluster.Gateway {
 
 // Grow adds a new node to the cluster.
 func (f *heartbeatFixture) Grow() *cluster.Gateway {
-	state, gateway, address := f.node()
-	name := address
+	// Figure out the current leader
+	var target *cluster.Gateway
+	for {
+		for _, gateway := range f.gateways {
+			if gateway.Raft().State() == raft.Leader {
+				target = gateway
+				break
+			}
+		}
+		if target != nil {
+			break
+		}
+		// Wait a bit for election to take place
+		time.Sleep(10 * time.Millisecond)
+	}
 
-	target := f.gateways[0]
 	targetState := f.states[target]
 
+	state, gateway, address := f.node()
+	name := address
+
 	nodes, err := cluster.Accept(
 		targetState, target, name, address, cluster.SchemaVersion, len(version.APIExtensions))
 	require.NoError(f.t, err)

From 4bd02e5e549ce97e49a35a4f0d04a251c241cc6c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 3 Jan 2018 16:22:52 +0000
Subject: [PATCH 213/227] Clustering documentation

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 doc/clustering.md | 268 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 268 insertions(+)
 create mode 100644 doc/clustering.md

diff --git a/doc/clustering.md b/doc/clustering.md
new file mode 100644
index 000000000..8cb98113f
--- /dev/null
+++ b/doc/clustering.md
@@ -0,0 +1,268 @@
+# Clustering
+
+LXD can be run in clustering mode, where any number of LXD instance
+share the same distributed database and can be managed uniformly using
+the lxc client or the REST API.
+
+Note that this feature was introduced as part of API extension "clustering".
+
+## Forming a cluster
+
+First you need to choose a bootstrap LXD node. It can be an existing
+LXD instance or a brand new one. Then you need to initialize the
+bootstrap node and join further nodes to the cluster. This can be done
+interactively or with a preseed file.
+
+Note that all further nodes joining the cluster must have identical
+configuration to the bootstrap node, in terms of storage pools and
+networks. The only configuration that can be node-specific are the
+`source` and `size` keys for storage pools and the
+`bridge.external_interfaces` key for networks.
+
+It is recommended that the number of nodes in the cluster is at least
+three, so the cluster can survive the loss of at least one node and
+still be able to have a quorum for its distributed state (which is
+kept in a SQLite database replicated using the Raft algorithm).
+
+### Interactively
+
+Run `lxd init` and answer `yes` to the very first question ("Would you
+like to use LXD clustering?"). Then choose a name for identifying the
+node, and an IP or DNS address that other nodes can use to connect to
+it, and answer `no` to the question about whether you're joining an
+existing cluster. Finally, optionally create a storage pool and a
+network bridge. At this point your first cluster node should be up and
+available on your network.
+
+You can now join further nodes to the cluster. Note however that these
+nodes should be brand new LXD instances, or alternatively you should
+clear them up before joining, since any existing data on them will be
+lost.
+
+Run `lxd init` and answer `yes` to the question about whether to use
+clustering. Pick a new node name which must be different from the one
+of the bootstrap node or any other nodes you joined so far. Then pick
+an IP or DNS address for the node and answer `yes` to the question
+about whether you're joining an existing cluster. Pick an address of
+an existing node in the cluster and check the fingerprint that gets
+printed.
+
+### Preseed
+
+Create a preseed file for the bootstrap node with the configuration
+you want, for example:
+
+```yaml
+config:
+  core.trust_password: sekret
+  core.https_address: 10.55.60.171:8443
+  images.auto_update_interval: 15
+storage_pools:
+- name: default
+  driver: dir
+networks:
+- name: lxdbr0
+  type: bridge
+  config:
+    ipv4.address: 192.168.100.14/24
+    ipv6.address: none
+profiles:
+- name: default
+  devices:
+    root:
+      path: /
+      pool: default
+      type: disk
+    eth0:
+      name: eth0
+      nictype: bridged
+      parent: lxdbr0
+      type: nic
+cluster:
+  name: node1
+```
+
+Then run `cat <preseed-file> | lxd init --preseed` and your first node
+should be bootstrapped.
+
+Now create a bootstrap file for another node. Be sure to specify the
+address and certificate of the target bootstrap node. To create a
+YAML-compatible entry for the `<cert>` key you can use a command like
+`sed ':a;N;$!ba;s/\n/\n\n/g' /var/lib/lxd/server.crt`, which you have to
+run on the bootstrap node.
+
+For example:
+
+```yaml
+config:
+  core.https_address: 10.55.60.155:8443
+  images.auto_update_interval: 15
+storage_pools:
+- name: default
+  driver: dir
+networks:
+- name: lxdbr0
+  type: bridge
+  config:
+    ipv4.address: 192.168.100.14/24
+    ipv6.address: none
+profiles:
+- name: default
+  devices:
+    root:
+      path: /
+      pool: default
+      type: disk
+    eth0:
+      name: eth0
+      nictype: bridged
+      parent: lxdbr0
+      type: nic
+cluster:
+  name: node2
+  target_address: 10.55.60.171:8443
+  target_password: sekret
+  target_cert: "-----BEGIN CERTIFICATE-----
+
+opyQ1VRpAg2sV2C4W8irbNqeUsTeZZxhLqp4vNOXXBBrSqUCdPu1JXADV0kavg1l
+
+2sXYoMobyV3K+RaJgsr1OiHjacGiGCQT3YyNGGY/n5zgT/8xI0Dquvja0bNkaf6f
+
+...
+
+-----END CERTIFICATE-----
+"
+```
+
+## Managing a cluster
+
+Once your cluster is formed you can see a list of its nodes and their
+status by running `lxc cluster list`. More detailed information about
+an individual node is available with `lxc cluster show <node name>`.
+
+### Deleting nodes
+
+To cleanly delete a node from the cluster use `lxc cluster delete <node name>`.
+
+### Offline nodes and fault tolerance
+
+At each time there will be an elected cluster leader that will monitor
+the health of the other nodes. If a node is down for more than 20
+seconds, its status will be marked as OFFLINE and no operation will be
+possible on it, as well as operations that require a state changes
+across all nodes.
+
+If the node that goes offline is the leader itself, the other nodes
+will elect a new leader.
+
+As soon as the offline node comes back online, operations will be
+available again.
+
+If you can't or don't want to bring the node back online, you can
+delete it from the cluster using `lxc cluster delete --force <node name>`.
+
+### Upgrading nodes
+
+To upgrade a cluster you need to upgrade all its nodes, making sure
+that they all upgrade to the very same LXD version.
+
+To upgrade a single node, simply upgrade the lxd/lxc binaries on the
+host (via snap or other packaging systems) and restart the lxd daemon.
+
+If the new version of the daemon has database schema or API changes,
+the restarted node might transition into a BLOCKED state. That happens
+if there are still nodes in the cluster that have not been upgraded
+and that are running a less recent version. When a node is in the
+BLOCKED state it will not serve any LXD API request (in particular,
+lxc commands on that node will not work, although any running
+container will continue to run).
+
+You can see if some nodes are blocked by running `lxc cluster list` on
+a node which is not blocked.
+
+As you proceed upgrading the rest of the nodes, they will all
+transition to the BLOCKED state, until you upgrade the very last
+one. At that point the blocked nodes will notice that there is no
+out-of-date node left and will become operational again.
+
+## Containers
+
+You can launch a container on any node in the cluster from any node in
+the cluster. For example, from node1:
+
+```bash
+lxc launch --target node2 ubuntu:16.04 xenial
+```
+
+will launch an Ubuntu 16.04 container on node2.
+
+You can list all containers in the cluster with:
+
+```bash
+lxc list
+```
+
+The NODE column will indicate on which node they are running.
+
+After a container is launched, you can operate it from any node. For
+example, from node1:
+
+```bash
+lxc exec xenial ls /
+lxc stop xenial
+lxc delete xenial
+lxc pull file xenial/etc/hosts .
+```
+
+## Storage pools
+
+As mentioned above, all nodes must have identical storage pools. The
+only difference between pools on different nodes might be their
+`source` and `size` configuration keys.
+
+To create a new storage pool, you first have to define it across all
+nodes, for example:
+
+```bash
+lxc storage create --target node1 data zfs source=/dev/vdb1
+lxc storage create --target node2 data zfs source=/dev/vdc1
+```
+
+At this point the pool hasn't been actually created yet, but just
+defined (it's state is marked as PENDING if you run `lxc storage list`).
+
+Now run:
+
+```bash
+lxc storage create data zfs
+```
+
+and the storage will be actually created on all nodes. If you didn't
+define it on some node, or some node is down, an error will be
+returned.
+
+## Storage volumes
+
+Each volume is lives on a specific node. The `lxc storage volume list`
+includes a `NODE` column to indicate which node a certain volume lives
+on.
+
+Different volumes can have the same name as long as they live on
+different nodes (for example image volumes). You can manage storage
+volumes in the same way you do in non-clustered deployments, except
+that you'll have to pass a `--target <node name>` parameter to volume
+commands if more than one node has a volume with the given name.
+
+For example:
+
+```bash
+# Create a volume on the node this client is pointing at
+lxc storage volume create default web
+
+# Create a volume with the same node on another node
+lxc storage volume create default web --target node2
+
+# Show the two volumes defined
+lxc storage volume show default web --target node1
+lxc storage volume show default web --target node2
+```

From 28e8efd539df9eb165741e6ad508129ec01dd0c5 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 22 Jan 2018 10:51:16 +0000
Subject: [PATCH 214/227] Skip pending networks when initializing the daemon

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/networks.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/networks.go b/lxd/networks.go
index 3db245e25..5da75d3fe 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -616,7 +616,7 @@ func networkLoadByName(s *state.State, name string) (*network, error) {
 
 func networkStartup(s *state.State) error {
 	// Get a list of managed networks
-	networks, err := s.Cluster.Networks()
+	networks, err := s.Cluster.NetworksNotPending()
 	if err != nil {
 		return err
 	}

From dc2cc1d949fc19877bda54815bd9d482f3c41015 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 22 Jan 2018 11:18:15 +0000
Subject: [PATCH 215/227] Add --target parameter to lxc network show

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/lxd_networks.go    |  6 +++++-
 doc/api-extensions.md     |  6 ++++++
 lxc/network.go            | 12 ++++++++++--
 lxd/networks.go           | 18 ++++++++++++++++++
 lxd/storage_pools.go      | 28 +++++++---------------------
 test/suites/clustering.sh |  3 ++-
 6 files changed, 48 insertions(+), 25 deletions(-)

diff --git a/client/lxd_networks.go b/client/lxd_networks.go
index b3a5fbc45..996fd3daa 100644
--- a/client/lxd_networks.go
+++ b/client/lxd_networks.go
@@ -46,7 +46,11 @@ func (r *ProtocolLXD) GetNetwork(name string) (*api.Network, string, error) {
 	network := api.Network{}
 
 	// Fetch the raw value
-	etag, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s", url.QueryEscape(name)), nil, "", &network)
+	path := fmt.Sprintf("/networks/%s", url.QueryEscape(name))
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	etag, err := r.queryStruct("GET", path, nil, "", &network)
 	if err != nil {
 		return nil, "", err
 	}
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 84d26c5bc..0bf513516 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -417,3 +417,9 @@ The following existing endpoints have been modified:
  * `GET /1.0/storage-pool/<name>` accepts a new targetNode query parameter
  * `POST /1.0/storage-pool/<pool>/volumes/<type>` accepts a new targetNode query parameter
  * `GET /1.0/storage-pool/<pool>/volumes/<type>/<name>` accepts a new targetNode query parameter
+ * `POST /1.0/storage-pool/<pool>/volumes/<type>/<name>` accepts a new targetNode query parameter
+ * `PUT /1.0/storage-pool/<pool>/volumes/<type>/<name>` accepts a new targetNode query parameter
+ * `PATCH /1.0/storage-pool/<pool>/volumes/<type>/<name>` accepts a new targetNode query parameter
+ * `DELETE /1.0/storage-pool/<pool>/volumes/<type>/<name>` accepts a new targetNode query parameter
+ * `POST /1.0/networks` accepts a new targetNode query parameter
+ * `GET /1.0/networks/<name>` accepts a new targetNode query parameter
diff --git a/lxc/network.go b/lxc/network.go
index d2c77e587..df9467c67 100644
--- a/lxc/network.go
+++ b/lxc/network.go
@@ -57,13 +57,13 @@ Manage and attach containers to networks.
 lxc network list [<remote>:]
     List available networks.
 
-lxc network show [<remote>:]<network>
+lxc network show [<remote>:]<network> [--target <node>]
     Show details of a network.
 
 lxc network create [<remote>:]<network> [key=value...] [--target <node>]
     Create a network.
 
-lxc network get [<remote>:]<network> <key>
+lxc network get [<remote>:]<network> <key> [--target <node>]
     Get network configuration.
 
 lxc network set [<remote>:]<network> <key> <value>
@@ -472,6 +472,10 @@ func (c *networkCmd) doNetworkGet(client lxd.ContainerServer, name string, args
 		return errArgs
 	}
 
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	resp, _, err := client.GetNetwork(name)
 	if err != nil {
 		return err
@@ -595,6 +599,10 @@ func (c *networkCmd) doNetworkShow(client lxd.ContainerServer, name string) erro
 		return errArgs
 	}
 
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	network, _, err := client.GetNetwork(name)
 	if err != nil {
 		return err
diff --git a/lxd/networks.go b/lxd/networks.go
index 5da75d3fe..9e950106f 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -323,6 +323,12 @@ func doNetworksCreate(d *Daemon, req api.NetworksPost, withDatabase bool) error
 var networksCmd = Command{name: "networks", get: networksGet, post: networksPost}
 
 func networkGet(d *Daemon, r *http.Request) Response {
+	// If a target was specified, forward the request to the relevant node.
+	response := ForwardedResponseIfTargetIsRemote(d, r)
+	if response != nil {
+		return response
+	}
+
 	name := mux.Vars(r)["name"]
 
 	n, err := doNetworkGet(d, name)
@@ -330,6 +336,18 @@ func networkGet(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	targetNode := r.FormValue("targetNode")
+	clustered, err := cluster.Enabled(d.db)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	// If no target node is specified and the daemon is clustered, we omit
+	// the node-specific fields.
+	if targetNode == "" && clustered {
+		delete(n.Config, "bridge.external_interfaces")
+	}
+
 	etag := []interface{}{n.Name, n.Managed, n.Type, n.Description, n.Config}
 
 	return SyncResponseETag(true, &n, etag)
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 779c3e360..eba98f82d 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -257,6 +257,12 @@ var storagePoolsCmd = Command{name: "storage-pools", get: storagePoolsGet, post:
 // /1.0/storage-pools/{name}
 // Get a single storage pool.
 func storagePoolGet(d *Daemon, r *http.Request) Response {
+	// If a target was specified, forward the request to the relevant node.
+	response := ForwardedResponseIfTargetIsRemote(d, r)
+	if response != nil {
+		return response
+	}
+
 	poolName := mux.Vars(r)["name"]
 
 	// Get the existing storage pool.
@@ -279,7 +285,7 @@ func storagePoolGet(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
-	// If no target node is specified and the client is clustered, we omit
+	// If no target node is specified and the daemon is clustered, we omit
 	// the node-specific fields.
 	if targetNode == "" && clustered {
 		for _, key := range db.StoragePoolNodeConfigKeys {
@@ -287,26 +293,6 @@ func storagePoolGet(d *Daemon, r *http.Request) Response {
 		}
 	}
 
-	// If a target was specified, forward the request to the relevant node.
-	if targetNode != "" {
-		address, err := cluster.ResolveTarget(d.cluster, targetNode)
-		if err != nil {
-			return SmartError(err)
-		}
-		if address != "" {
-			cert := d.endpoints.NetworkCert()
-			client, err := cluster.Connect(address, cert, true)
-			if err != nil {
-				return SmartError(err)
-			}
-			client = client.ClusterTargetNode(targetNode)
-			pool, _, err = client.GetStoragePool(poolName)
-			if err != nil {
-				return SmartError(err)
-			}
-		}
-	}
-
 	etag := []interface{}{pool.Name, pool.Driver, pool.Config}
 
 	return SyncResponseETag(true, &pool, etag)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 0f8bf7173..d56b2117c 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -368,7 +368,7 @@ test_clustering_network() {
 
   # Trying to pass config values other than
   # 'bridge.external_interfaces' results in an error
-  ! LXD_DIR="${LXD_ONE_DIR}" lxc create network foo ipv4.address=auto --target node1
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc network create foo ipv4.address=auto --target node1
 
   net="${bridge}x"
 
@@ -386,6 +386,7 @@ test_clustering_network() {
   # Create the network
   LXD_DIR="${LXD_TWO_DIR}" lxc network create "${net}"
   LXD_DIR="${LXD_ONE_DIR}" lxc network show "${net}" | grep state: | grep -q CREATED
+  LXD_DIR="${LXD_ONE_DIR}" lxc network show "${net}" --target node2 | grep state: | grep -q CREATED
 
   # Delete the networks
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${net}"

From 6e7ea15242b91c4e17201c2c8c612f97e1f7d1b4 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 22 Jan 2018 12:22:41 +0000
Subject: [PATCH 216/227] Add error message when trying to rename a network in
 a cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/networks.go           | 38 ++++++++++++++++++++++++--------------
 test/suites/clustering.sh |  3 +++
 2 files changed, 27 insertions(+), 14 deletions(-)

diff --git a/lxd/networks.go b/lxd/networks.go
index 9e950106f..50cef6645 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -310,10 +310,7 @@ func doNetworksCreate(d *Daemon, req api.NetworksPost, withDatabase bool) error
 
 	err = n.Start()
 	if err != nil {
-		if !withDatabase {
-			n.state = nil
-		}
-		n.Delete()
+		n.Delete(withDatabase)
 		return err
 	}
 
@@ -444,17 +441,15 @@ func networkDelete(d *Daemon, r *http.Request) Response {
 		return NotFound
 	}
 
+	withDatabase := true
 	if isClusterNotification(r) {
-		n.state = nil // We just want to delete the network from the system
+		withDatabase = false // We just want to delete the network from the system
 	} else {
 		// Sanity checks
 		if n.IsUsed() {
 			return BadRequest(fmt.Errorf("The network is currently in use"))
 		}
-	}
 
-	// If we're just handling a notification, we're done.
-	if n.state != nil {
 		// Notify all other nodes. If any node is down, an error will be returned.
 		notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAll)
 		if err != nil {
@@ -469,7 +464,7 @@ func networkDelete(d *Daemon, r *http.Request) Response {
 	}
 
 	// Delete the network
-	err = n.Delete()
+	err = n.Delete(withDatabase)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -483,12 +478,27 @@ func networkDelete(d *Daemon, r *http.Request) Response {
 }
 
 func networkPost(d *Daemon, r *http.Request) Response {
+	// FIXME: renaming a network is currently not supported in clustering
+	//        mode. The difficulty is that network.Start() depends on the
+	//        network having already been renamed in the database, which is
+	//        a chicken-and-egg problem for cluster notifications (the
+	//        serving node should typically do the database job, so the
+	//        network is not yet renamed inthe db when the notified node
+	//        runs network.Start).
+	clustered, err := cluster.Enabled(d.db)
+	if err != nil {
+		return SmartError(err)
+	}
+	if clustered {
+		return BadRequest(fmt.Errorf("Renaming a network not supported in LXD clusters"))
+	}
+
 	name := mux.Vars(r)["name"]
 	req := api.NetworkPost{}
 	state := d.State()
 
 	// Parse the request
-	err := json.NewDecoder(r.Body).Decode(&req)
+	err = json.NewDecoder(r.Body).Decode(&req)
 	if err != nil {
 		return BadRequest(err)
 	}
@@ -724,7 +734,7 @@ func (n *network) IsUsed() bool {
 	return false
 }
 
-func (n *network) Delete() error {
+func (n *network) Delete(withDatabase bool) error {
 	// Bring the network down
 	if n.IsRunning() {
 		err := n.Stop()
@@ -733,9 +743,9 @@ func (n *network) Delete() error {
 		}
 	}
 
-	// If state is nil, this is a cluster notification, and we don't want
-	// to perform any database work.
-	if n.state == nil {
+	// If withDatabase is false, this is a cluster notification, and we
+	// don't want to perform any database work.
+	if !withDatabase {
 		return nil
 	}
 
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index d56b2117c..003d4ccbf 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -388,6 +388,9 @@ test_clustering_network() {
   LXD_DIR="${LXD_ONE_DIR}" lxc network show "${net}" | grep state: | grep -q CREATED
   LXD_DIR="${LXD_ONE_DIR}" lxc network show "${net}" --target node2 | grep state: | grep -q CREATED
 
+  # FIXME: rename the network is not supported with clustering
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc network rename "${net}" "${net}-foo"
+
   # Delete the networks
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${net}"
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"

From f3e1941d9e26f9ae1b394fdd7d8a92ca132eda60 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 22 Jan 2018 12:38:41 +0000
Subject: [PATCH 217/227] Add NetworkNodeConfigKeys listing node-specific
 network config keys

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go        |  4 ++--
 lxd/cluster/membership.go | 11 +++++++----
 lxd/db/migration.go       | 14 ++++++++++----
 lxd/db/networks.go        |  7 ++++++-
 lxd/networks.go           | 13 +++++++------
 5 files changed, 32 insertions(+), 17 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index d0c2bf670..195747ff2 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -292,8 +292,8 @@ func clusterCheckNetworksMatch(cluster *db.Cluster, reqNetworks []api.Network) e
 			if err != nil {
 				return err
 			}
-			// Exclude the "bridge.external_interfaces" key, which is node-specific.
-			exclude := []string{"bridge.external_interfaces"}
+			// Exclude the keys which are node-specific.
+			exclude := db.NetworkNodeConfigKeys
 			err = util.CompareConfigs(network.Config, reqNetwork.Config, exclude)
 			if err != nil {
 				return fmt.Errorf("Mismatching config for network %s: %v", name, err)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index c2e946313..f874b79cb 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -360,10 +360,13 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 			if err != nil {
 				return errors.Wrap(err, "failed to add joining node's to the network")
 			}
-			// We only need to add the bridge.external_interfaces
-			// key, since the other keys are global and are already
-			// there.
-			config = map[string]string{"bridge.external_interfaces": config["bridge.external_interfaces"]}
+			// We only need to add the node-specific keys, since
+			// the other keys are global and are already there.
+			for key := range config {
+				if !shared.StringInSlice(key, db.NetworkNodeConfigKeys) {
+					delete(config, key)
+				}
+			}
 			err = tx.NetworkConfigAdd(id, node.ID, config)
 			if err != nil {
 				return errors.Wrap(err, "failed to add joining node's network config")
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 6f46e037d..7c686c57b 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -115,16 +115,22 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 			case "containers":
 				appendNodeID()
 			case "networks_config":
-				// The "bridge.external_interfaces" config key
-				// is the only one which is not global to the
+				// The keys listed in NetworkNodeConfigKeys
+				// are the only ones which are not global to the
 				// cluster, so all other keys will have a NULL
 				// node_id.
+				index := 0
 				for i, column := range columns {
-					if column == "key" && row[i] != "bridge.external_interfaces" {
-						nullNodeID = true
+					if column == "key" {
+						index = i
 						break
 					}
 				}
+				key := row[index].(string)
+				if !shared.StringInSlice(key, NetworkNodeConfigKeys) {
+					nullNodeID = true
+					break
+				}
 				appendNodeID()
 			case "storage_pools_config":
 				// The keys listed in StoragePoolNodeConfigKeys
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 794f1f8f0..4164cdea7 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -526,7 +526,7 @@ func networkConfigAdd(tx *sql.Tx, networkID, nodeID int64, config map[string]str
 			continue
 		}
 		var nodeIDValue interface{}
-		if k != "bridge.external_interfaces" {
+		if !shared.StringInSlice(k, NetworkNodeConfigKeys) {
 			nodeIDValue = nil
 		} else {
 			nodeIDValue = nodeID
@@ -585,3 +585,8 @@ func (c *Cluster) NetworkRename(oldName string, newName string) error {
 
 	return TxCommit(tx)
 }
+
+// NetworkNodeConfigKeys lists all network config keys which are node-specific.
+var NetworkNodeConfigKeys = []string{
+	"bridge.external_interfaces",
+}
diff --git a/lxd/networks.go b/lxd/networks.go
index 50cef6645..731454583 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -119,7 +119,7 @@ func networksPost(d *Daemon, r *http.Request) Response {
 		// network without actually creating it. The only legal key
 		// value for the storage config is 'bridge.external_interfaces'.
 		for key := range req.Config {
-			if key != "bridge.external_interfaces" {
+			if !shared.StringInSlice(key, db.NetworkNodeConfigKeys) {
 				return SmartError(fmt.Errorf("Invalid config key '%s'", key))
 			}
 		}
@@ -182,11 +182,10 @@ func networksPost(d *Daemon, r *http.Request) Response {
 }
 
 func networksPostCluster(d *Daemon, req api.NetworksPost) error {
-	// Check that no 'bridge.external_interfaces' config key has been
-	// defined, since that's node-specific.
+	// Check that no node-specific config key has been defined.
 	for key := range req.Config {
-		if key == "bridge.external_interfaces" {
-			return fmt.Errorf("Config key 'bridge.external_interfaces' is node-specific")
+		if shared.StringInSlice(key, db.NetworkNodeConfigKeys) {
+			return fmt.Errorf("Config key '%s' is node-specific", key)
 		}
 	}
 
@@ -342,7 +341,9 @@ func networkGet(d *Daemon, r *http.Request) Response {
 	// If no target node is specified and the daemon is clustered, we omit
 	// the node-specific fields.
 	if targetNode == "" && clustered {
-		delete(n.Config, "bridge.external_interfaces")
+		for _, key := range db.NetworkNodeConfigKeys {
+			delete(n.Config, key)
+		}
 	}
 
 	etag := []interface{}{n.Name, n.Managed, n.Type, n.Description, n.Config}

From 938eebc527c80bf4f7c9ad58b2d8af402652cb5e Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 9 Jan 2018 12:43:32 +0000
Subject: [PATCH 218/227] Regenerate i18n strings

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 po/de.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/el.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/es.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/fi.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/fr.po      | 350 ++++++++++++++++++++++++++++++++++-----------------------
 po/id.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/it.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/ja.po      | 353 +++++++++++++++++++++++++++++++++++-----------------------
 po/lxd.pot    | 335 +++++++++++++++++++++++++++++++++----------------------
 po/nb_NO.po   | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/nl.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/pl.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/pt_BR.po   | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/ru.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/sr.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/sv.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/tr.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/zh.po      | 346 +++++++++++++++++++++++++++++++++-----------------------
 po/zh_Hans.po | 346 +++++++++++++++++++++++++++++++++-----------------------
 19 files changed, 3971 insertions(+), 2603 deletions(-)

diff --git a/po/de.po b/po/de.po
index d0f7f4882..6541ddb17 100644
--- a/po/de.po
+++ b/po/de.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: 2017-02-14 17:11+0000\n"
 "Last-Translator: Tim Rose <tim at netlope.de>\n"
 "Language-Team: German <https://hosted.weblate.org/projects/linux-containers/"
@@ -19,7 +19,7 @@ msgstr ""
 "Plural-Forms: nplurals=2; plural=n != 1;\n"
 "X-Generator: Weblate 2.14-dev\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 #, fuzzy
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
@@ -54,7 +54,7 @@ msgstr ""
 "###\n"
 "### Der Name wird zwar angezeigt, lässt sich jedoch nicht ändern.\n"
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 #, fuzzy
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
@@ -173,7 +173,7 @@ msgstr ""
 "### Zum Beispiel:\n"
 "###  description: Mein eigenes Abbild\n"
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 #, fuzzy
 msgid ""
 "### This is a yaml representation of the network.\n"
@@ -260,7 +260,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -286,7 +286,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -312,7 +312,7 @@ msgstr "Administrator Passwort für %s: "
 msgid "Aliases:"
 msgstr "Aliasse:\n"
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, fuzzy, c-format
 msgid "Architecture: %s"
 msgstr "Architektur: %s\n"
@@ -336,11 +336,11 @@ msgstr "Ungültige Abbild Eigenschaft: %s\n"
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr "Bytes empfangen"
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr "Bytes gesendet"
 
@@ -352,11 +352,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 #, fuzzy
 msgid "CPU usage:"
 msgstr " Prozessorauslastung:"
@@ -366,7 +366,7 @@ msgstr " Prozessorauslastung:"
 msgid "CREATED"
 msgstr "ERSTELLT AM"
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr "ERSTELLT AM"
 
@@ -383,7 +383,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -402,7 +402,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -415,7 +415,7 @@ msgstr "Fingerabdruck des Zertifikats: % x\n"
 msgid "Client certificate stored at server: "
 msgstr "Gespeichertes Nutzerzertifikat auf dem Server: "
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -423,13 +423,13 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 #, fuzzy
 msgid "Config key/value to apply to the new container"
 msgstr "kann nicht zum selben Container Namen kopieren"
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, fuzzy, c-format
 msgid "Config parsing error: %s"
 msgstr "YAML Analyse Fehler %v\n"
@@ -446,7 +446,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -482,27 +482,31 @@ msgstr "Kann Verzeichnis für Zertifikate auf dem Server nicht erstellen"
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 #, fuzzy
 msgid "Creating the container"
 msgstr "kann nicht zum selben Container Namen kopieren"
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -520,7 +524,7 @@ msgstr "Gerät %s wurde zu %s hinzugefügt\n"
 msgid "Device %s removed from %s"
 msgstr "Gerät %s wurde von %s entfernt\n"
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, fuzzy, c-format
 msgid "Device already exists: %s"
 msgstr "entfernte Instanz %s existiert bereits"
@@ -537,12 +541,12 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 #, fuzzy
 msgid "Disk usage:"
 msgstr " Prozessorauslastung:"
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -568,7 +572,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr "Flüchtiger Container"
 
@@ -603,7 +607,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -623,7 +627,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr "kann nicht zum selben Container Namen kopieren"
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -633,11 +637,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 #, fuzzy
 msgid "Filtering isn't supported yet"
 msgstr ""
@@ -653,6 +657,10 @@ msgstr "Fingerabdruck: %s\n"
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 #, fuzzy
 msgid "Force the container to shutdown"
@@ -666,7 +674,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -679,11 +687,11 @@ msgstr "Generiere Nutzerzertifikat. Dies kann wenige Minuten dauern...\n"
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -731,7 +739,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -769,7 +777,7 @@ msgstr "Ungültige Quelle %s"
 msgid "Invalid target %s"
 msgstr "Ungültiges Ziel %s"
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -777,7 +785,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -794,14 +802,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr "Veröffentliche Abbild"
@@ -811,23 +823,23 @@ msgstr "Veröffentliche Abbild"
 msgid "Make the image public"
 msgstr "Veröffentliche Abbild"
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr "Fehlende Zusammenfassung."
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -850,40 +862,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "der Name des Ursprung Containers muss angegeben werden"
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, fuzzy, c-format
 msgid "Network %s created"
 msgstr "Profil %s erstellt\n"
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, fuzzy, c-format
 msgid "Network %s deleted"
 msgstr "Profil %s gelöscht\n"
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, fuzzy, c-format
+msgid "Network %s pending on node %s"
+msgstr "Profil %s erstellt\n"
+
+#: lxc/network.go:465
 #, fuzzy, c-format
 msgid "Network %s renamed to %s"
 msgstr "Profil %s erstellt\n"
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 #, fuzzy
 msgid "Network usage:"
 msgstr "Profil %s erstellt\n"
@@ -897,12 +918,12 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr "Kein Zertifikat zum hinzufügen bereitgestellt"
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 #, fuzzy
 msgid "No device found for this network"
 msgstr "Kein Zertifikat für diese Verbindung"
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 #, fuzzy
 msgid "No device found for this storage volume."
 msgstr "Kein Zertifikat für diese Verbindung"
@@ -911,7 +932,26 @@ msgstr "Kein Zertifikat für diese Verbindung"
 msgid "No fingerprint specified."
 msgstr "Kein Fingerabdruck angegeben."
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, fuzzy, c-format
+msgid "Node %s removed"
+msgstr "Gerät %s wurde von %s entfernt\n"
+
+#: lxc/cluster.go:118
+#, fuzzy, c-format
+msgid "Node %s renamed to %s"
+msgstr "Profil %s wurde auf %s angewandt\n"
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, fuzzy, c-format
+msgid "Node: %s"
+msgstr "automatisches Update: %s"
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -923,7 +963,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -940,19 +980,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -964,11 +1004,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -982,7 +1022,7 @@ msgstr "Alternatives config Verzeichnis."
 msgid "Path to an alternate server directory"
 msgstr "Alternatives config Verzeichnis."
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 #, fuzzy
 msgid "Pause containers."
 msgstr "kann nicht zum selben Container Namen kopieren"
@@ -991,12 +1031,12 @@ msgstr "kann nicht zum selben Container Namen kopieren"
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -1021,7 +1061,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, fuzzy, c-format
 msgid "Processes: %d"
 msgstr "Profil %s erstellt\n"
@@ -1051,7 +1091,7 @@ msgstr "Gerät %s wurde von %s entfernt\n"
 msgid "Profile %s renamed to %s"
 msgstr "Profil %s wurde auf %s angewandt\n"
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 #, fuzzy
 msgid "Profile to apply to the new container"
 msgstr "kann nicht zum selben Container Namen kopieren"
@@ -1061,7 +1101,7 @@ msgstr "kann nicht zum selben Container Namen kopieren"
 msgid "Profiles %s applied to %s"
 msgstr "Profil %s wurde auf %s angewandt\n"
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, fuzzy, c-format
 msgid "Profiles: %s"
 msgstr "Profil %s erstellt\n"
@@ -1093,12 +1133,12 @@ msgstr ""
 msgid "Remote admin password"
 msgstr "Entferntes Administrator Passwort"
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 #, fuzzy
 msgid "Remote operation canceled by user"
 msgstr "Server Zertifikat vom Benutzer nicht akzeptiert"
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -1108,7 +1148,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1117,11 +1157,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 #, fuzzy
 msgid "Restart containers."
 msgstr "kann nicht zum selben Container Namen kopieren"
@@ -1131,7 +1171,7 @@ msgstr "kann nicht zum selben Container Namen kopieren"
 msgid "Retrieve the container's console log"
 msgstr "Herunterfahren des Containers erzwingen."
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1144,15 +1184,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1164,7 +1204,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1218,11 +1258,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1231,7 +1271,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr "Größe: %.2vMB\n"
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1244,7 +1284,7 @@ msgstr "Anhalten des Containers fehlgeschlagen!"
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 #, fuzzy
 msgid "Start containers."
 msgstr "kann nicht zum selben Container Namen kopieren"
@@ -1254,12 +1294,12 @@ msgstr "kann nicht zum selben Container Namen kopieren"
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 #, fuzzy
 msgid "Stop containers."
 msgstr "Anhalten des Containers fehlgeschlagen!"
@@ -1277,27 +1317,32 @@ msgstr "Anhalten des Containers fehlgeschlagen!"
 msgid "Stopping the container failed: %s"
 msgstr "Anhalten des Containers fehlgeschlagen!"
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, fuzzy, c-format
 msgid "Storage pool %s created"
 msgstr "Profil %s erstellt\n"
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, fuzzy, c-format
 msgid "Storage pool %s deleted"
 msgstr "Profil %s gelöscht\n"
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, fuzzy, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr "Profil %s erstellt\n"
+
+#: lxc/init.go:146 lxc/init.go:147
 #, fuzzy
 msgid "Storage pool name"
 msgstr "Profilname kann nicht geändert werden"
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, fuzzy, c-format
 msgid "Storage volume %s created"
 msgstr "Profil %s erstellt\n"
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, fuzzy, c-format
 msgid "Storage volume %s deleted"
 msgstr "Profil %s gelöscht\n"
@@ -1307,15 +1352,15 @@ msgstr "Profil %s gelöscht\n"
 msgid "Store the container state (only for stop)"
 msgstr "Herunterfahren des Containers erzwingen."
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1329,7 +1374,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1344,7 +1389,7 @@ msgstr "entfernte Instanz %s existiert bereits"
 msgid "The device doesn't exist"
 msgstr "entfernte Instanz %s existiert nicht"
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1353,12 +1398,12 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 #, fuzzy
 msgid "The specified device doesn't exist"
 msgstr "entfernte Instanz %s existiert nicht"
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 #, fuzzy
 msgid "The specified device doesn't match the network"
 msgstr "entfernte Instanz %s existiert nicht"
@@ -1385,11 +1430,11 @@ msgstr "Wartezeit bevor der Container gestoppt wird."
 msgid "Timestamps:"
 msgstr "Zeitstempel:\n"
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1420,11 +1465,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1432,11 +1477,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1472,6 +1517,25 @@ msgstr ""
 "Benutzung: lxc [Unterbefehl] [Optionen]\n"
 "Verfügbare Befehle:\n"
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 #, fuzzy
 msgid ""
@@ -1843,12 +1907,13 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 #, fuzzy
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1875,7 +1940,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1968,6 +2034,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -2051,7 +2119,7 @@ msgstr ""
 "\n"
 "lxc move <Quelle> <Ziel>\n"
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -2060,13 +2128,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2355,7 +2423,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2365,13 +2433,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2394,28 +2463,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2469,7 +2541,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2491,7 +2563,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr "Zustand des laufenden Containers sichern oder nicht"
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2525,11 +2597,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2537,7 +2609,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2555,11 +2627,11 @@ msgstr "Fehler: %v\n"
 msgid "error: unknown command: %s"
 msgstr "Fehler: unbekannter Befehl: %s\n"
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2572,7 +2644,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr "OK (y/n)? "
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2601,32 +2673,32 @@ msgstr "entfernte Instanz %s existiert als <%s>"
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr "falsche Anzahl an Parametern für Unterbefehl"
 
diff --git a/po/el.po b/po/el.po
index 0e68d83c8..0254687aa 100644
--- a/po/el.po
+++ b/po/el.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: 2017-02-14 08:00+0000\n"
 "Last-Translator: Simos Xenitellis <simos.65 at gmail.com>\n"
 "Language-Team: Greek <https://hosted.weblate.org/projects/linux-containers/"
@@ -19,7 +19,7 @@ msgstr ""
 "Plural-Forms: nplurals=2; plural=n != 1;\n"
 "X-Generator: Weblate 2.12-dev\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -36,7 +36,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -105,7 +105,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -156,7 +156,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -182,7 +182,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -207,7 +207,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -231,11 +231,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -247,11 +247,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 #, fuzzy
 msgid "CPU usage:"
 msgstr "  Χρήση CPU:"
@@ -260,7 +260,7 @@ msgstr "  Χρήση CPU:"
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -273,7 +273,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -292,7 +292,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -305,7 +305,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -313,12 +313,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -335,7 +335,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -370,26 +370,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -407,7 +411,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -424,12 +428,12 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 #, fuzzy
 msgid "Disk usage:"
 msgstr "  Χρήση CPU:"
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -453,7 +457,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -488,7 +492,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -507,7 +511,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -517,11 +521,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -534,6 +538,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -546,7 +554,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -558,11 +566,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -609,7 +617,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -646,7 +654,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -654,7 +662,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -671,14 +679,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -687,24 +699,24 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 #, fuzzy
 msgid "Memory usage:"
 msgstr "  Χρήση μνήμης:"
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -724,40 +736,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 #, fuzzy
 msgid "Network usage:"
 msgstr "  Χρήση δικτύου:"
@@ -770,11 +791,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -782,7 +803,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -794,7 +834,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -811,19 +851,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -835,11 +875,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -851,7 +891,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -859,12 +899,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -889,7 +929,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -919,7 +959,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -928,7 +968,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -959,11 +999,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -973,7 +1013,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -982,11 +1022,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -994,7 +1034,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1007,15 +1047,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1027,7 +1067,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1079,11 +1119,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1092,7 +1132,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1105,7 +1145,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1114,12 +1154,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1136,26 +1176,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1164,15 +1209,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1186,7 +1231,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1199,7 +1244,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1208,11 +1253,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1236,11 +1281,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1271,11 +1316,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1283,11 +1328,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1317,6 +1362,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1633,11 +1697,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1652,7 +1717,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1733,6 +1799,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1798,7 +1866,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1807,13 +1875,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2046,7 +2114,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2056,13 +2124,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2085,28 +2154,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2156,7 +2228,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2176,7 +2248,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2208,11 +2280,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2220,7 +2292,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2238,11 +2310,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2254,7 +2326,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2283,32 +2355,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/es.po b/po/es.po
index 153806fa1..a87d94176 100644
--- a/po/es.po
+++ b/po/es.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/fi.po b/po/fi.po
index 76fd54f0a..267be59d7 100644
--- a/po/fi.po
+++ b/po/fi.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/fr.po b/po/fr.po
index 3fd9c092f..cd8dea6ea 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: 2018-01-02 10:52+0000\n"
 "Last-Translator: Bruno Perel <brunoperel at gmail.com>\n"
 "Language-Team: French <https://hosted.weblate.org/projects/linux-containers/"
@@ -19,7 +19,7 @@ msgstr ""
 "Plural-Forms: nplurals=2; plural=n > 1;\n"
 "X-Generator: Weblate 2.19-dev\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -50,7 +50,7 @@ msgstr ""
 "###   source: /home/chb/mnt/lxd_test/default.img\n"
 "###   zfs.pool_name: default"
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 #, fuzzy
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
@@ -167,7 +167,7 @@ msgstr ""
 "### Un exemple serait :\n"
 "###  description: Mon image personnalisée"
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -251,7 +251,7 @@ msgstr "%s (%d de plus)"
 msgid "%s is not a directory"
 msgstr "%s n'est pas un répertoire"
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr "%v (interrompre encore deux fois pour forcer)"
@@ -278,7 +278,7 @@ msgstr "ALIAS"
 msgid "ARCH"
 msgstr "ARCH"
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr "ARCHITECTURE"
 
@@ -304,7 +304,7 @@ msgstr "Mot de passe administrateur pour %s : "
 msgid "Aliases:"
 msgstr "Alias :"
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr "Architecture : %s"
@@ -328,11 +328,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr "Octets reçus"
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr "Octets émis"
 
@@ -344,11 +344,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr "COMMON NAME"
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr "CPU utilisé (en secondes)"
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr "CPU utilisé :"
 
@@ -357,7 +357,7 @@ msgstr "CPU utilisé :"
 msgid "CREATED"
 msgstr "CRÉÉ À"
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr "CRÉÉ À"
 
@@ -371,7 +371,7 @@ msgstr "Créé : %s"
 msgid "Can't pull a directory without --recursive"
 msgstr "impossible de récupérer un répertoire sans --recursive"
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr "Impossible de lire depuis stdin : %s"
@@ -392,7 +392,7 @@ msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 "Impossible de désaffecter la clé '%s', elle n'est pas définie actuellement."
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr "Impossible de fournir le nom du conteneur à lister"
 
@@ -405,7 +405,7 @@ msgstr "Empreinte du certificat : %x"
 msgid "Client certificate stored at server: "
 msgstr "Certificat client enregistré sur le serveur : "
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr "Colonnes"
 
@@ -413,12 +413,12 @@ msgstr "Colonnes"
 msgid "Commands:"
 msgstr "Commandes :"
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr "Clé/valeur de configuration à appliquer au nouveau conteneur"
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr "Erreur lors de la lecture de la configuration : %s"
@@ -435,7 +435,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr "Le nom du conteneur est obligatoire"
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr "Le nom du conteneur est : %s"
@@ -471,26 +471,30 @@ msgstr "Impossible de créer le dossier de stockage des certificats serveurs"
 msgid "Create any directories necessary"
 msgstr "Créer tous répertoires nécessaires"
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr "Créé : %s"
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr "Création de %s"
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr "Création du conteneur"
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr "DESCRIPTION"
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr "PILOTE"
 
@@ -508,7 +512,7 @@ msgstr "Périphérique %s ajouté à %s"
 msgid "Device %s removed from %s"
 msgstr "Périphérique %s retiré de %s"
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, fuzzy, c-format
 msgid "Device already exists: %s"
 msgstr "le serveur distant %s existe déjà"
@@ -525,12 +529,12 @@ msgstr "Désactiver l'allocation pseudo-terminal"
 msgid "Disable stdin (reads from /dev/null)"
 msgstr "Désactiver stdin (lecture à partir de /dev/null)"
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 #, fuzzy
 msgid "Disk usage:"
 msgstr "  Disque utilisé :"
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr "ÉPHÉMÈRE"
 
@@ -554,7 +558,7 @@ msgstr "Variable d'environnement (de la forme HOME=/home/foo) à positionner"
 msgid "Environment:"
 msgstr "Environnement :"
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr "Conteneur éphémère"
 
@@ -590,7 +594,7 @@ msgstr "NOM"
 msgid "FINGERPRINT"
 msgstr "EMPREINTE"
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, fuzzy, c-format
 msgid "Failed to create alias %s"
 msgstr "Échec lors de la génération de 'lxc.%s.1': %v"
@@ -610,7 +614,7 @@ msgstr "Échec lors de la génération de 'lxc.1': %v"
 msgid "Failed to get the new container name"
 msgstr "Profil à appliquer au nouveau conteneur"
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -620,12 +624,12 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 #, fuzzy
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr "Mode rapide (identique à --columns=nsacPt"
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -638,6 +642,10 @@ msgstr "Empreinte : %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "Forcer l'allocation de pseudo-terminal "
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr "Forcer le conteneur à s'arrêter"
@@ -651,7 +659,7 @@ msgstr "Forcer la suppression des conteneurs arrêtés"
 msgid "Force using the local unix socket"
 msgstr "Forcer l'utilisation de la socket unix locale"
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -664,11 +672,11 @@ msgstr "Génération d'un certificat client. Ceci peut prendre une minute…"
 msgid "ID"
 msgstr "PID"
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr "IPv4"
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr "IPv6"
 
@@ -720,7 +728,7 @@ msgstr "Image copiée avec succès !"
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -757,7 +765,7 @@ msgstr "Source invalide %s"
 msgid "Invalid target %s"
 msgstr "Cible invalide %s"
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr "IPs :"
 
@@ -765,7 +773,7 @@ msgstr "IPs :"
 msgid "Keep the image up to date after initial copy"
 msgstr "Garder l'image à jour après la copie initiale"
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr "DERNIÈRE UTILISATION À"
 
@@ -782,14 +790,18 @@ msgstr "Dernière utilisation : %s"
 msgid "Last used: never"
 msgstr "Dernière utilisation : jamais"
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr "Journal : "
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr "GÉRÉ"
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr "Rendre l'image publique"
@@ -798,24 +810,24 @@ msgstr "Rendre l'image publique"
 msgid "Make the image public"
 msgstr "Rendre l'image publique"
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr "Mémoire (courante)"
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr "Mémoire (pointe)"
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 #, fuzzy
 msgid "Memory usage:"
 msgstr "  Mémoire utilisée :"
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr "Résumé manquant."
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr "Plus d'un périphérique correspond, spécifier le nom du périphérique."
 
@@ -837,40 +849,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "Vous devez fournir le nom d'un conteneur pour : "
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr "NOM"
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr "NON"
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr "Nom : %s"
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr "Le réseau %s a été créé"
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr "Le réseau %s a été supprimé"
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, fuzzy, c-format
+msgid "Network %s pending on node %s"
+msgstr "Le réseau %s a été créé"
+
+#: lxc/network.go:465
 #, fuzzy, c-format
 msgid "Network %s renamed to %s"
 msgstr "Le réseau %s a été créé"
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr "Nom du réseau"
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 #, fuzzy
 msgid "Network usage:"
 msgstr "  Réseau utilisé :"
@@ -883,11 +904,11 @@ msgstr "Nouvel alias à définir sur la cible"
 msgid "No certificate provided to add"
 msgstr "Un certificat à ajouter n'a pas été fourni"
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr "Aucun périphérique existant pour ce réseau"
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 #, fuzzy
 msgid "No device found for this storage volume."
 msgstr "Aucun périphérique existant pour ce réseau"
@@ -896,7 +917,27 @@ msgstr "Aucun périphérique existant pour ce réseau"
 msgid "No fingerprint specified."
 msgstr "Aucune empreinte n'a été indiquée."
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, fuzzy, c-format
+msgid "Node %s removed"
+msgstr "Profil %s supprimé de %s"
+
+#: lxc/cluster.go:118
+#, fuzzy, c-format
+msgid "Node %s renamed to %s"
+msgstr "Profil %s ajouté à %s"
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+#, fuzzy
+msgid "Node name"
+msgstr "Nom du réseau"
+
+#: lxc/info.go:119
+#, fuzzy, c-format
+msgid "Node: %s"
+msgstr "Nom : %s"
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 "Seuls les volumes \"personnalisés\" peuvent être attachés aux conteneurs."
@@ -909,7 +950,7 @@ msgstr "Seules les URLs https sont supportées par simplestreams"
 msgid "Only https:// is supported for remote image import."
 msgstr "Seul https:// est supporté par l'import d'images distantes."
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr "Seuls les réseaux gérés par LXD peuvent être modifiés."
 
@@ -926,19 +967,19 @@ msgstr "Options :"
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr "Surcharger le mode terminal (auto, interactif ou non-interactif)"
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr "PERSISTANT"
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr "PID"
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr "PROFILS"
 
@@ -950,11 +991,11 @@ msgstr "PROTOCOLE"
 msgid "PUBLIC"
 msgstr "PUBLIC"
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr "Paquets reçus"
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr "Paquets émis"
 
@@ -966,7 +1007,7 @@ msgstr "Chemin vers un dossier de configuration client alternatif"
 msgid "Path to an alternate server directory"
 msgstr "Chemin vers un dossier de configuration serveur alternatif"
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 #, fuzzy
 msgid "Pause containers."
 msgstr "Création du conteneur"
@@ -975,12 +1016,12 @@ msgstr "Création du conteneur"
 msgid "Permission denied, are you in the lxd group?"
 msgstr "Permission refusée, êtes-vous dans le groupe lxd ?"
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr "Pid : %d"
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr "Appuyer sur Entrée pour ouvrir à nouveau l'éditeur"
 
@@ -1005,7 +1046,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr "Afficher des informations supplémentaires"
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr "Processus : %d"
@@ -1035,7 +1076,7 @@ msgstr "Profil %s supprimé de %s"
 msgid "Profile %s renamed to %s"
 msgstr "Profil %s ajouté à %s"
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr "Profil à appliquer au nouveau conteneur"
 
@@ -1044,7 +1085,7 @@ msgstr "Profil à appliquer au nouveau conteneur"
 msgid "Profiles %s applied to %s"
 msgstr "Profils %s appliqués à %s"
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr "Profils : %s"
@@ -1075,12 +1116,12 @@ msgstr "Récupération de l'image : %s"
 msgid "Remote admin password"
 msgstr "Mot de passe de l'administrateur distant"
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 #, fuzzy
 msgid "Remote operation canceled by user"
 msgstr "Certificat serveur rejeté par l'utilisateur"
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr "Serveur distant : %s"
@@ -1090,7 +1131,7 @@ msgstr "Serveur distant : %s"
 msgid "Remove %s (yes/no): "
 msgstr "Supprimer %s (oui/non) : "
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1099,11 +1140,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr "Requérir une confirmation de l'utilisateur"
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr "Ressources :"
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 #, fuzzy
 msgid "Restart containers."
 msgstr "Création du conteneur"
@@ -1113,7 +1154,7 @@ msgstr "Création du conteneur"
 msgid "Retrieve the container's console log"
 msgstr "Forcer l'arrêt du conteneur (seulement pour stop)"
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr "Récupération de l'image : %s"
@@ -1126,15 +1167,15 @@ msgstr ""
 msgid "SIZE"
 msgstr "TAILLE"
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr "INSTANTANÉS"
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr "SOURCE"
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr "ÉTAT"
 
@@ -1147,7 +1188,7 @@ msgstr "STATIQUE"
 msgid "STATUS"
 msgstr "ÉTAT"
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr "ENSEMBLE DE STOCKAGE"
 
@@ -1201,11 +1242,11 @@ msgstr "Afficher la configuration étendue"
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1214,7 +1255,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr "Taille : %.2f Mo"
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr "Instantanés :"
 
@@ -1227,7 +1268,7 @@ msgstr "L'arrêt du conteneur a échoué !"
 msgid "Source:"
 msgstr "Source :"
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 #, fuzzy
 msgid "Start containers."
 msgstr "Création du conteneur"
@@ -1237,12 +1278,12 @@ msgstr "Création du conteneur"
 msgid "Starting %s"
 msgstr "Démarrage de %s"
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr "État : %s"
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 #, fuzzy
 msgid "Stop containers."
 msgstr "L'arrêt du conteneur a échoué !"
@@ -1260,26 +1301,31 @@ msgstr "L'arrêt du conteneur a échoué !"
 msgid "Stopping the container failed: %s"
 msgstr "L'arrêt du conteneur a échoué !"
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, fuzzy, c-format
 msgid "Storage pool %s created"
 msgstr "Le réseau %s a été créé"
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, fuzzy, c-format
 msgid "Storage pool %s deleted"
 msgstr "Le réseau %s a été supprimé"
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, fuzzy, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr "Le réseau %s a été créé"
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr "Nom de l'ensemble de stockage"
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, fuzzy, c-format
 msgid "Storage volume %s created"
 msgstr "Profil %s créé"
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, fuzzy, c-format
 msgid "Storage volume %s deleted"
 msgstr "Profil %s supprimé"
@@ -1288,15 +1334,15 @@ msgstr "Profil %s supprimé"
 msgid "Store the container state (only for stop)"
 msgstr "Forcer l'arrêt du conteneur (seulement pour stop)"
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr "Swap (courant)"
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr "Swap (pointe)"
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr "TYPE"
 
@@ -1313,7 +1359,7 @@ msgstr ""
 "Le conteneur est en cours d'exécution. Utiliser --force pour qu'il soit "
 "arrêté et redémarré."
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 "Le conteneur que vous démarrez n'est attaché à aucune interface réseau."
@@ -1328,7 +1374,7 @@ msgstr "Le périphérique n'existe pas"
 msgid "The device doesn't exist"
 msgstr "Le périphérique n'existe pas"
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr "L'image locale '%s' n'a pas été trouvée, essayer '%s:' à la place."
@@ -1338,11 +1384,11 @@ msgstr "L'image locale '%s' n'a pas été trouvée, essayer '%s:' à la place."
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr "Le pendant de `lxc pause` est `lxc start`."
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr "Le périphérique indiqué n'existe pas"
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr "le périphérique indiqué ne correspond pas au réseau"
 
@@ -1371,11 +1417,11 @@ msgstr "Temps d'attente du conteneur avant de le tuer"
 msgid "Timestamps:"
 msgstr "Horodatage :"
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr "Pour attacher un réseau à un conteneur, utiliser : lxc network attach"
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr "Pour créer un réseau, utiliser : lxc network create"
 
@@ -1407,11 +1453,11 @@ msgstr "Transfert de l'image : %s"
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr "Essayer `lxc info --show-log %s` pour plus d'informations"
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr "Type : éphémère"
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr "Type : persistant"
 
@@ -1419,11 +1465,11 @@ msgstr "Type : persistant"
 msgid "UPLOAD DATE"
 msgstr "DATE DE PUBLICATION"
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr "URL"
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr "UTILISÉ PAR"
 
@@ -1456,6 +1502,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr "Utilisation : lxc <commande> [options]"
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 #, fuzzy
 msgid ""
@@ -1963,12 +2028,13 @@ msgstr ""
 "lxc info [<serveur distant>:]\n"
 "    Pour l'information du serveur LXD."
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 #, fuzzy
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1996,7 +2062,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -2091,6 +2158,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -2242,7 +2311,7 @@ msgstr ""
 "lxc move <container>/<old snapshot name> <container>/<new snapshot name>\n"
 "    Renomme un instantané."
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 #, fuzzy
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
@@ -2252,13 +2321,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2639,7 +2708,7 @@ msgstr ""
 "Exemple :\n"
 "    lxc snapshot u1 snap0"
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2649,13 +2718,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2678,28 +2748,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2753,7 +2826,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr "L'utilisateur a annulé l'opération de suppression."
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2775,7 +2848,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr "Réaliser ou pas l'instantané de l'état de fonctionnement du conteneur"
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr "OUI"
 
@@ -2811,11 +2884,11 @@ msgstr "impossible de spécifier uid/gid/mode en mode récursif"
 msgid "default"
 msgstr "par défaut"
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr "pas d'image, conteneur ou instantané affecté sur ce serveur"
 
@@ -2823,7 +2896,7 @@ msgstr "pas d'image, conteneur ou instantané affecté sur ce serveur"
 msgid "disabled"
 msgstr "désactivé"
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2841,13 +2914,14 @@ msgstr "erreur : %v"
 msgid "error: unknown command: %s"
 msgstr "erreur : commande inconnue: %s"
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
+#, fuzzy
 msgid "name"
-msgstr ""
+msgstr "Nom du réseau"
 
 #: lxc/image.go:209 lxc/image.go:583 lxc/image.go:588
 msgid "no"
@@ -2857,7 +2931,7 @@ msgstr "non"
 msgid "ok (y/n)?"
 msgstr "ok (y/n) ?"
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr "l'analyse des alias a échoué %s\n"
@@ -2886,32 +2960,32 @@ msgstr "le serveur distant %s existe en tant que <%s>"
 msgid "remote %s is static and cannot be modified"
 msgstr "le serveur distant %s est statique et ne peut être modifié"
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr "à suivi d'état"
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr "sans suivi d'état"
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr "pris à %s"
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr "nombre d'arguments incorrect pour la sous-comande"
 
diff --git a/po/id.po b/po/id.po
index af6c7ce69..aabc77d09 100644
--- a/po/id.po
+++ b/po/id.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/it.po b/po/it.po
index a34c108c4..8ac181d62 100644
--- a/po/it.po
+++ b/po/it.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: 2017-08-18 14:22+0000\n"
 "Last-Translator: Alberto Donato <alberto.donato at gmail.com>\n"
 "Language-Team: Italian <https://hosted.weblate.org/projects/linux-containers/"
@@ -19,7 +19,7 @@ msgstr ""
 "Plural-Forms: nplurals=2; plural=n != 1;\n"
 "X-Generator: Weblate 2.17-dev\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -50,7 +50,7 @@ msgstr ""
 "###   source: /home/chb/mnt/lxd_test/default.img\n"
 "###   zfs.pool_name: default"
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -126,7 +126,7 @@ msgstr ""
 "### Un esempio è il seguente:\n"
 "###  description: My custom image"
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -177,7 +177,7 @@ msgstr "%s (altri %d)"
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr "%v (interrompi altre due volte per forzare)"
@@ -203,7 +203,7 @@ msgstr "ALIAS"
 msgid "ARCH"
 msgstr "ARCH"
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr "ARCHITETTURA"
 
@@ -228,7 +228,7 @@ msgstr "Password amministratore per %s: "
 msgid "Aliases:"
 msgstr "Alias:"
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr "Architettura: %s"
@@ -252,11 +252,11 @@ msgstr "Proprietà errata: %s"
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr "Bytes ricevuti"
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr "Byte inviati"
 
@@ -268,11 +268,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr "NOME COMUNE"
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr "Utilizzo CPU (in secondi)"
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr "Utilizzo CPU:"
 
@@ -281,7 +281,7 @@ msgstr "Utilizzo CPU:"
 msgid "CREATED"
 msgstr "CREATO IL"
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr "CREATO IL"
 
@@ -294,7 +294,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr "Impossibile effettuare il pull di una directory senza --recursive"
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr "Impossible leggere da stdin: %s"
@@ -313,7 +313,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -326,7 +326,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr "Certificato del client salvato dal server: "
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr "Colonne"
 
@@ -334,12 +334,12 @@ msgstr "Colonne"
 msgid "Commands:"
 msgstr "Comandi:"
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -356,7 +356,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr "Il nome del container è: %s"
@@ -391,26 +391,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr "Creazione di %s in corso"
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr "Creazione del container in corso"
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr "DESCRIZIONE"
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr "DRIVER"
 
@@ -428,7 +432,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr "La periferica esiste già: %s"
@@ -445,11 +449,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr "Utilizzo disco:"
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -473,7 +477,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -508,7 +512,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -527,7 +531,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -537,11 +541,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 #, fuzzy
 msgid "Filtering isn't supported yet"
 msgstr "'%s' non è un tipo di file supportato."
@@ -555,6 +559,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -567,7 +575,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -579,11 +587,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -630,7 +638,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -667,7 +675,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -675,7 +683,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -692,14 +700,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -708,23 +720,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -744,40 +756,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -789,11 +810,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -801,7 +822,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, fuzzy, c-format
+msgid "Node: %s"
+msgstr "Aggiornamento automatico: %s"
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -813,7 +853,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -830,19 +870,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -854,11 +894,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -870,7 +910,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -878,12 +918,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -908,7 +948,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -938,7 +978,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -947,7 +987,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -978,11 +1018,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -992,7 +1032,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1001,11 +1041,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1013,7 +1053,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1026,15 +1066,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1046,7 +1086,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1098,11 +1138,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1111,7 +1151,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1124,7 +1164,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1133,12 +1173,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1155,26 +1195,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1183,15 +1228,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1205,7 +1250,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1218,7 +1263,7 @@ msgstr "La periferica esiste già"
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1227,11 +1272,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1255,11 +1300,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1290,11 +1335,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1302,11 +1347,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1336,6 +1381,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1652,11 +1716,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1671,7 +1736,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1752,6 +1818,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1817,7 +1885,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1826,13 +1894,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2065,7 +2133,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2075,13 +2143,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2104,28 +2173,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2175,7 +2247,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2195,7 +2267,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2227,11 +2299,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2239,7 +2311,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2257,11 +2329,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2273,7 +2345,7 @@ msgstr "no"
 msgid "ok (y/n)?"
 msgstr "ok (y/n)?"
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr "errore di processamento degli alias %s\n"
@@ -2302,32 +2374,32 @@ msgstr "il remote %s esiste come %s"
 msgid "remote %s is static and cannot be modified"
 msgstr "il remote %s è statico e non può essere modificato"
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr "senza stato"
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr "salvato alle %s"
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr "numero errato di argomenti del sottocomando"
 
diff --git a/po/ja.po b/po/ja.po
index 9e636b5a6..4707aa4d2 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: 2018-01-02 10:52+0000\n"
 "Last-Translator: KATOH Yasufumi <karma at jazz.email.ne.jp>\n"
 "Language-Team: Japanese <https://hosted.weblate.org/projects/linux-"
@@ -19,7 +19,7 @@ msgstr ""
 "Plural-Forms: nplurals=1; plural=0;\n"
 "X-Generator: Weblate 2.19-dev\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -36,7 +36,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -105,7 +105,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -156,7 +156,7 @@ msgstr "%s (他%d個)"
 msgid "%s is not a directory"
 msgstr "%s はディレクトリではありません"
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -183,7 +183,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -208,7 +208,7 @@ msgstr "%s の管理者パスワード: "
 msgid "Aliases:"
 msgstr "エイリアス:"
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr "アーキテクチャ: %s"
@@ -232,11 +232,11 @@ msgstr "不正なイメージプロパティ形式: %s"
 msgid "Both --all and container name given"
 msgstr "--all とコンテナ名を両方同時に指定することはできません"
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr "受信バイト数"
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr "送信バイト数"
 
@@ -248,11 +248,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr "CPU使用量(秒)"
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr "CPU使用量:"
 
@@ -260,7 +260,7 @@ msgstr "CPU使用量:"
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -274,7 +274,7 @@ msgid "Can't pull a directory without --recursive"
 msgstr ""
 "ディレクトリを pull する場合は --recursive オプションを使用してください"
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr "標準入力から読み込めません: %s"
@@ -293,7 +293,7 @@ msgstr "キー '%s' が設定されていないので削除できません"
 msgid "Can't unset key '%s', it's not currently set."
 msgstr "キー '%s' が指定されていないので削除できません。"
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr "コンテナ名を取得できません"
 
@@ -306,7 +306,7 @@ msgstr "証明書のフィンガープリント: %s"
 msgid "Client certificate stored at server: "
 msgstr "クライアント証明書がサーバに格納されました: "
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr "カラムレイアウト"
 
@@ -314,12 +314,12 @@ msgstr "カラムレイアウト"
 msgid "Commands:"
 msgstr "コマンド:"
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr "新しいコンテナに適用するキー/値の設定"
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr "設定の構文エラー: %s"
@@ -336,7 +336,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr "コンテナ名を指定する必要があります"
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr "コンテナ名: %s"
@@ -371,26 +371,30 @@ msgstr "サーバ証明書格納用のディレクトリを作成できません
 msgid "Create any directories necessary"
 msgstr "必要なディレクトリをすべて作成します"
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr "作成日時: %s"
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr "%s を作成中"
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr "コンテナを作成中"
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -408,7 +412,7 @@ msgstr "デバイス %s が %s に追加されました"
 msgid "Device %s removed from %s"
 msgstr "デバイス %s が %s から削除されました"
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr "デバイスは既に存在します: %s"
@@ -425,11 +429,11 @@ msgstr "擬似端末の割り当てを無効にします"
 msgid "Disable stdin (reads from /dev/null)"
 msgstr "標準入力を無効にします (/dev/null から読み込みます)"
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr "ディスク使用量:"
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -453,7 +457,7 @@ msgstr "環境変数を設定します (例: HOME=/home/foo)"
 msgid "Environment:"
 msgstr "環境変数:"
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr "Ephemeral コンテナ"
 
@@ -488,7 +492,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr "エイリアス %s の作成に失敗しました"
@@ -507,7 +511,7 @@ msgstr "'lxc.1' の生成が失敗しました: %v"
 msgid "Failed to get the new container name"
 msgstr "新しいコンテナ名が取得できません"
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr "エイリアス %s の削除に失敗しました"
@@ -517,11 +521,11 @@ msgstr "エイリアス %s の削除に失敗しました"
 msgid "Failed to walk path for %s: %s"
 msgstr "パス %s にアクセスできませんでした: %s"
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr "Fast モード (--columns=nsacPt と同じ)"
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr "情報表示のフィルタリングはまだサポートされていません"
 
@@ -534,6 +538,10 @@ msgstr "証明書のフィンガープリント: %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "強制的に擬似端末を割り当てます"
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr "コンテナを強制シャットダウンします"
@@ -546,7 +554,7 @@ msgstr "稼働中のコンテナを強制的に削除します"
 msgid "Force using the local unix socket"
 msgstr "強制的にローカルのUNIXソケットを使います"
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr "フォーマット (csv|json|table|yaml)"
 
@@ -558,11 +566,11 @@ msgstr "クライアント証明書を生成します。1分ぐらいかかり
 msgid "ID"
 msgstr "ID"
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr "IPV4"
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr "IPV6"
 
@@ -610,7 +618,7 @@ msgstr "イメージの更新が成功しました!"
 msgid "Input data"
 msgstr "入力するデータ"
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr "インスタンスタイプ"
 
@@ -647,7 +655,7 @@ msgstr "不正なソース %s"
 msgid "Invalid target %s"
 msgstr "不正な送り先 %s"
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr "IPアドレス:"
 
@@ -655,7 +663,7 @@ msgstr "IPアドレス:"
 msgid "Keep the image up to date after initial copy"
 msgstr "最初にコピーした後も常にイメージを最新の状態に保つ"
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -672,14 +680,18 @@ msgstr "最終使用: %s"
 msgid "Last used: never"
 msgstr "最終使用: 未使用"
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr "ログ:"
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr "イメージを public にする"
@@ -688,23 +700,23 @@ msgstr "イメージを public にする"
 msgid "Make the image public"
 msgstr "イメージを public にする"
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr "メモリ (現在値)"
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr "メモリ (ピーク)"
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr "メモリ消費量:"
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr "サマリーはありません。"
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr "複数のデバイスとマッチします。デバイス名を指定してください。"
 
@@ -726,40 +738,49 @@ msgstr "ディレクトリからのインポートは root で実行する必要
 msgid "Must supply container name for: "
 msgstr "コンテナ名を指定する必要があります: "
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr "コンテナ名: %s"
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr "ネットワーク %s を作成しました"
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr "ネットワーク %s を削除しました"
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, fuzzy, c-format
+msgid "Network %s pending on node %s"
+msgstr "ネットワーク名 %s を %s に変更しました"
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr "ネットワーク名 %s を %s に変更しました"
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr "ネットワーク名:"
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr "ネットワーク使用状況:"
 
@@ -771,11 +792,11 @@ msgstr "新しいエイリアスを定義する"
 msgid "No certificate provided to add"
 msgstr "追加すべき証明書が提供されていません"
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr "このネットワークに対するデバイスがありません"
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr "このストレージボリュームに対するデバイスがありません。"
 
@@ -783,7 +804,27 @@ msgstr "このストレージボリュームに対するデバイスがありま
 msgid "No fingerprint specified."
 msgstr "フィンガープリントが指定されていません。"
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, fuzzy, c-format
+msgid "Node %s removed"
+msgstr "プロファイル %s が %s から削除されました"
+
+#: lxc/cluster.go:118
+#, fuzzy, c-format
+msgid "Node %s renamed to %s"
+msgstr "プロファイル名 %s を %s に変更しました"
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+#, fuzzy
+msgid "Node name"
+msgstr "ネットワーク名:"
+
+#: lxc/info.go:119
+#, fuzzy, c-format
+msgid "Node: %s"
+msgstr "コンテナ名: %s"
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr "\"カスタム\" のボリュームのみがコンテナにアタッチできます。"
 
@@ -795,7 +836,7 @@ msgstr "simplestreams は https の URL のみサポートします"
 msgid "Only https:// is supported for remote image import."
 msgstr "リモートイメージのインポートは https:// のみをサポートします。"
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr "管理対象のネットワークのみ変更できます。"
 
@@ -812,19 +853,19 @@ msgstr "オプション:"
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr "ターミナルモードを上書きします (auto, interactive, non-interactive)"
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr "PID"
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -836,11 +877,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr "受信パケット"
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr "送信パケット"
 
@@ -852,7 +893,7 @@ msgstr "別のクライアント用設定ディレクトリ"
 msgid "Path to an alternate server directory"
 msgstr "別のサーバ用設定ディレクトリ"
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr "コンテナを一時停止します。"
 
@@ -860,12 +901,12 @@ msgstr "コンテナを一時停止します。"
 msgid "Permission denied, are you in the lxd group?"
 msgstr "アクセスが拒否されました。lxd グループに所属していますか?"
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr "Pid: %d"
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr "再度エディタを開くためには Enter キーを押します"
 
@@ -890,7 +931,7 @@ msgstr "レスポンスをそのまま表示します"
 msgid "Print verbose information"
 msgstr "詳細情報を表示します"
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr "プロセス数: %d"
@@ -920,7 +961,7 @@ msgstr "プロファイル %s が %s から削除されました"
 msgid "Profile %s renamed to %s"
 msgstr "プロファイル名 %s を %s に変更しました"
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr "新しいコンテナに適用するプロファイル"
 
@@ -929,7 +970,7 @@ msgstr "新しいコンテナに適用するプロファイル"
 msgid "Profiles %s applied to %s"
 msgstr "プロファイル %s が %s に追加されました"
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr "プロファイル: %s"
@@ -960,11 +1001,11 @@ msgstr "イメージの更新中: %s"
 msgid "Remote admin password"
 msgstr "リモートの管理者パスワード"
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr "リモート操作がユーザによってキャンセルされました"
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr "リモート名: %s"
@@ -974,7 +1015,7 @@ msgstr "リモート名: %s"
 msgid "Remove %s (yes/no): "
 msgstr "%s を消去しますか (yes/no): "
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr "ストレージボリューム名 \"%s\" を \"%s\" に変更しました"
@@ -983,11 +1024,11 @@ msgstr "ストレージボリューム名 \"%s\" を \"%s\" に変更しまし
 msgid "Require user confirmation"
 msgstr "ユーザの確認を要求する"
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr "リソース:"
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr "コンテナを再起動します。"
 
@@ -995,7 +1036,7 @@ msgstr "コンテナを再起動します。"
 msgid "Retrieve the container's console log"
 msgstr "コンテナのコンソールログを取得します"
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr "イメージの取得中: %s"
@@ -1008,15 +1049,15 @@ msgstr "すべてのコンテナに対してコマンドを実行します"
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1028,7 +1069,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1080,11 +1121,11 @@ msgstr "拡張した設定を表示する"
 msgid "Show the resources available to the server"
 msgstr "サーバで使用可能なリソースを表示します"
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr "ストレージプールで利用可能なリソースを表示します"
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1093,7 +1134,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr "サイズ: %.2fMB"
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr "スナップショット:"
 
@@ -1106,7 +1147,7 @@ msgstr "一部のコンテナで %s が失敗しました"
 msgid "Source:"
 msgstr "取得元:"
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr "コンテナを起動します。"
 
@@ -1115,12 +1156,12 @@ msgstr "コンテナを起動します。"
 msgid "Starting %s"
 msgstr "%s を起動中"
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr "状態: %s"
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr "コンテナを停止します。"
 
@@ -1137,26 +1178,31 @@ msgstr "コンテナの停止に失敗しました!"
 msgid "Stopping the container failed: %s"
 msgstr "コンテナの停止に失敗しました: %s"
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr "ストレージプール %s を作成しました"
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr "ストレージプール %s を削除しました"
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, fuzzy, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr "ストレージプール %s を作成しました"
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr "ストレージプール名"
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr "ストレージボリューム %s を作成しました"
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr "ストレージボリューム %s を削除しました"
@@ -1165,15 +1211,15 @@ msgstr "ストレージボリューム %s を削除しました"
 msgid "Store the container state (only for stop)"
 msgstr "コンテナの状態を保存します (stopのみ)"
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr "Swap (現在値)"
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr "Swap (ピーク)"
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1189,7 +1235,7 @@ msgstr ""
 "コンテナは現在実行中です。停止して、再起動するために --force を使用してくだ\n"
 "さい。"
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr "起動しようとしたコンテナに接続されているネットワークがありません。"
 
@@ -1202,7 +1248,7 @@ msgstr "デバイスはすでに存在します"
 msgid "The device doesn't exist"
 msgstr "デバイスが存在しません"
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1212,11 +1258,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr "\"lxc pause\" の反対のコマンドは \"lxc start\" です。"
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr "指定したデバイスが存在しません"
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr "指定したデバイスはネットワークとマッチしません"
 
@@ -1246,12 +1292,12 @@ msgstr "コンテナを強制停止するまでの時間"
 msgid "Timestamps:"
 msgstr "タイムスタンプ:"
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 "コンテナにネットワークを接続するには、lxc network attach を使用してください"
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 "新しいネットワークを作成するには、lxc network create を使用してください"
@@ -1285,11 +1331,11 @@ msgstr "イメージを転送中: %s"
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr "更に情報を得るために `lxc info --show-log %s` を実行してみてください"
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr "タイプ: ephemeral"
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr "タイプ: persistent"
 
@@ -1297,11 +1343,11 @@ msgstr "タイプ: persistent"
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1334,6 +1380,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr "使い方: lxc <コマンド> [オプション]"
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1924,11 +1989,13 @@ msgstr ""
 "lxc info [<remote>:] [--resources]\n"
 "    LXD サーバの情報を表示します。"
 
-#: lxc/init.go:77
+#: lxc/init.go:78
+#, fuzzy
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1951,10 +2018,12 @@ msgstr ""
 "    lxc init ubuntu:16.04 u1"
 
 #: lxc/launch.go:20
+#, fuzzy
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -2047,6 +2116,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -2228,7 +2299,8 @@ msgstr ""
 "lxc move <container>/<old snapshot name> <container>/<new snapshot name>\n"
 "    スナップショットをリネームします。"
 
-#: lxc/network.go:50
+#: lxc/network.go:52
+#, fuzzy
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -2237,13 +2309,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2689,7 +2761,7 @@ msgstr ""
 "lxc snapshot u1 snap0\n"
 "    \"u1\" のスナップショットを \"snap0\" という名前で作成します。"
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 #, fuzzy
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
@@ -2700,13 +2772,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2729,28 +2802,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2895,7 +2971,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr "ユーザが削除操作を中断しました。"
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2918,7 +2994,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr "コンテナの稼動状態のスナップショットを取得するかどうか"
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2951,11 +3027,11 @@ msgstr "再帰 (recursive) モードでは uid/gid/mode を指定できません
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 "サーバから変更されたイメージ、コンテナ、スナップショットを取得できませんで\n"
@@ -2965,7 +3041,7 @@ msgstr ""
 msgid "disabled"
 msgstr "無効"
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2983,13 +3059,14 @@ msgstr "エラー: %v"
 msgid "error: unknown command: %s"
 msgstr "エラー: 未知のコマンド: %s"
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
+#, fuzzy
 msgid "name"
-msgstr ""
+msgstr "ネットワーク名:"
 
 #: lxc/image.go:209 lxc/image.go:583 lxc/image.go:588
 msgid "no"
@@ -2999,7 +3076,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr "ok (y/n)?"
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr "エイリアスの処理が失敗しました %s\n"
@@ -3028,32 +3105,32 @@ msgstr "リモート %s は <%s> として存在します"
 msgid "remote %s is static and cannot be modified"
 msgstr "リモート %s は static ですので変更できません"
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr "ステートフル"
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr "ステートレス"
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr "%s に取得しました"
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr "サブコマンドの引数の数が正しくありません"
 
diff --git a/po/lxd.pot b/po/lxd.pot
index a3a5a4ed6..614bae5ea 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -7,7 +7,7 @@
 msgid   ""
 msgstr  "Project-Id-Version: lxd\n"
         "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-        "POT-Creation-Date: 2018-01-21 22:50+0100\n"
+        "POT-Creation-Date: 2018-01-24 13:34+0000\n"
         "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
         "Last-Translator: FULL NAME <EMAIL at ADDRESS>\n"
         "Language-Team: LANGUAGE <LL at li.org>\n"
@@ -16,7 +16,7 @@ msgstr  "Project-Id-Version: lxd\n"
         "Content-Type: text/plain; charset=CHARSET\n"
         "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid   "### This is a yaml representation of a storage pool.\n"
         "### Any line starting with a '# will be ignored.\n"
         "###\n"
@@ -32,7 +32,7 @@ msgid   "### This is a yaml representation of a storage pool.\n"
         "###   zfs.pool_name: default"
 msgstr  ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid   "### This is a yaml representation of a storage volume.\n"
         "### Any line starting with a '# will be ignored.\n"
         "###\n"
@@ -97,7 +97,7 @@ msgid   "### This is a yaml representation of the image properties.\n"
         "###  description: My custom image"
 msgstr  ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid   "### This is a yaml representation of the network.\n"
         "### Any line starting with a '# will be ignored.\n"
         "###\n"
@@ -146,7 +146,7 @@ msgstr  ""
 msgid   "%s is not a directory"
 msgstr  ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid   "%v (interrupt two more times to force)"
 msgstr  ""
@@ -172,7 +172,7 @@ msgstr  ""
 msgid   "ARCH"
 msgstr  ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid   "ARCHITECTURE"
 msgstr  ""
 
@@ -197,7 +197,7 @@ msgstr  ""
 msgid   "Aliases:"
 msgstr  ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid   "Architecture: %s"
 msgstr  ""
@@ -221,11 +221,11 @@ msgstr  ""
 msgid   "Both --all and container name given"
 msgstr  ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid   "Bytes received"
 msgstr  ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid   "Bytes sent"
 msgstr  ""
 
@@ -237,11 +237,11 @@ msgstr  ""
 msgid   "COMMON NAME"
 msgstr  ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid   "CPU usage (in seconds)"
 msgstr  ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid   "CPU usage:"
 msgstr  ""
 
@@ -249,7 +249,7 @@ msgstr  ""
 msgid   "CREATED"
 msgstr  ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid   "CREATED AT"
 msgstr  ""
 
@@ -262,7 +262,7 @@ msgstr  ""
 msgid   "Can't pull a directory without --recursive"
 msgstr  ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid   "Can't read from stdin: %s"
 msgstr  ""
@@ -281,7 +281,7 @@ msgstr  ""
 msgid   "Can't unset key '%s', it's not currently set."
 msgstr  ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid   "Cannot provide container name to list"
 msgstr  ""
 
@@ -294,7 +294,7 @@ msgstr  ""
 msgid   "Client certificate stored at server: "
 msgstr  ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid   "Columns"
 msgstr  ""
 
@@ -302,11 +302,11 @@ msgstr  ""
 msgid   "Commands:"
 msgstr  ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid   "Config key/value to apply to the new container"
 msgstr  ""
 
-#: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190 lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190 lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid   "Config parsing error: %s"
 msgstr  ""
@@ -323,7 +323,7 @@ msgstr  ""
 msgid   "Container name is mandatory"
 msgstr  ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid   "Container name is: %s"
 msgstr  ""
@@ -358,25 +358,29 @@ msgstr  ""
 msgid   "Create any directories necessary"
 msgstr  ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid   "Created: %s"
 msgstr  ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid   "Creating %s"
 msgstr  ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid   "Creating the container"
 msgstr  ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525 lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid   "DATABASE"
+msgstr  ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547 lxc/storage.go:711 lxc/storage.go:916
 msgid   "DESCRIPTION"
 msgstr  ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid   "DRIVER"
 msgstr  ""
 
@@ -394,7 +398,7 @@ msgstr  ""
 msgid   "Device %s removed from %s"
 msgstr  ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid   "Device already exists: %s"
 msgstr  ""
@@ -411,11 +415,11 @@ msgstr  ""
 msgid   "Disable stdin (reads from /dev/null)"
 msgstr  ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid   "Disk usage:"
 msgstr  ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid   "EPHEMERAL"
 msgstr  ""
 
@@ -439,7 +443,7 @@ msgstr  ""
 msgid   "Environment:"
 msgstr  ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid   "Ephemeral container"
 msgstr  ""
 
@@ -474,7 +478,7 @@ msgstr  ""
 msgid   "FINGERPRINT"
 msgstr  ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid   "Failed to create alias %s"
 msgstr  ""
@@ -493,7 +497,7 @@ msgstr  ""
 msgid   "Failed to get the new container name"
 msgstr  ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid   "Failed to remove alias %s"
 msgstr  ""
@@ -503,11 +507,11 @@ msgstr  ""
 msgid   "Failed to walk path for %s: %s"
 msgstr  ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid   "Fast mode (same as --columns=nsacPt)"
 msgstr  ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid   "Filtering isn't supported yet"
 msgstr  ""
 
@@ -520,6 +524,10 @@ msgstr  ""
 msgid   "Force pseudo-terminal allocation"
 msgstr  ""
 
+#: lxc/cluster.go:41
+msgid   "Force removing a node, even if degraded"
+msgstr  ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid   "Force the container to shutdown"
 msgstr  ""
@@ -532,7 +540,7 @@ msgstr  ""
 msgid   "Force using the local unix socket"
 msgstr  ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid   "Format (csv|json|table|yaml)"
 msgstr  ""
 
@@ -544,11 +552,11 @@ msgstr  ""
 msgid   "ID"
 msgstr  ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid   "IPV4"
 msgstr  ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid   "IPV6"
 msgstr  ""
 
@@ -593,7 +601,7 @@ msgstr  ""
 msgid   "Input data"
 msgstr  ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid   "Instance type"
 msgstr  ""
 
@@ -630,7 +638,7 @@ msgstr  ""
 msgid   "Invalid target %s"
 msgstr  ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid   "Ips:"
 msgstr  ""
 
@@ -638,7 +646,7 @@ msgstr  ""
 msgid   "Keep the image up to date after initial copy"
 msgstr  ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid   "LAST USED AT"
 msgstr  ""
 
@@ -655,14 +663,18 @@ msgstr  ""
 msgid   "Last used: never"
 msgstr  ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid   "Log:"
 msgstr  ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid   "MANAGED"
 msgstr  ""
 
+#: lxc/cluster.go:186
+msgid   "MESSAGE"
+msgstr  ""
+
 #: lxc/image.go:176
 msgid   "Make image public"
 msgstr  ""
@@ -671,23 +683,23 @@ msgstr  ""
 msgid   "Make the image public"
 msgstr  ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid   "Memory (current)"
 msgstr  ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid   "Memory (peak)"
 msgstr  ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid   "Memory usage:"
 msgstr  ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid   "Missing summary."
 msgstr  ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid   "More than one device matches, specify the device name."
 msgstr  ""
 
@@ -707,39 +719,48 @@ msgstr  ""
 msgid   "Must supply container name for: "
 msgstr  ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid   "NAME"
 msgstr  ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid   "NO"
 msgstr  ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid   "NODE"
+msgstr  ""
+
 #: lxc/info.go:117
 #, c-format
 msgid   "Name: %s"
 msgstr  ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid   "Network %s created"
 msgstr  ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid   "Network %s deleted"
 msgstr  ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid   "Network %s pending on node %s"
+msgstr  ""
+
+#: lxc/network.go:465
 #, c-format
 msgid   "Network %s renamed to %s"
 msgstr  ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid   "Network name"
 msgstr  ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid   "Network usage:"
 msgstr  ""
 
@@ -751,11 +772,11 @@ msgstr  ""
 msgid   "No certificate provided to add"
 msgstr  ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid   "No device found for this network"
 msgstr  ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid   "No device found for this storage volume."
 msgstr  ""
 
@@ -763,7 +784,26 @@ msgstr  ""
 msgid   "No fingerprint specified."
 msgstr  ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid   "Node %s removed"
+msgstr  ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid   "Node %s renamed to %s"
+msgstr  ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid   "Node name"
+msgstr  ""
+
+#: lxc/info.go:119
+#, c-format
+msgid   "Node: %s"
+msgstr  ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid   "Only \"custom\" volumes can be attached to containers."
 msgstr  ""
 
@@ -775,7 +815,7 @@ msgstr  ""
 msgid   "Only https:// is supported for remote image import."
 msgstr  ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid   "Only managed networks can be modified."
 msgstr  ""
 
@@ -792,19 +832,19 @@ msgstr  ""
 msgid   "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr  ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid   "PERSISTENT"
 msgstr  ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid   "PID"
 msgstr  ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid   "PROCESSES"
 msgstr  ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid   "PROFILES"
 msgstr  ""
 
@@ -816,11 +856,11 @@ msgstr  ""
 msgid   "PUBLIC"
 msgstr  ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid   "Packets received"
 msgstr  ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid   "Packets sent"
 msgstr  ""
 
@@ -832,7 +872,7 @@ msgstr  ""
 msgid   "Path to an alternate server directory"
 msgstr  ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid   "Pause containers."
 msgstr  ""
 
@@ -840,12 +880,12 @@ msgstr  ""
 msgid   "Permission denied, are you in the lxd group?"
 msgstr  ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid   "Pid: %d"
 msgstr  ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid   "Press enter to open the editor again"
 msgstr  ""
 
@@ -869,7 +909,7 @@ msgstr  ""
 msgid   "Print verbose information"
 msgstr  ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid   "Processes: %d"
 msgstr  ""
@@ -899,7 +939,7 @@ msgstr  ""
 msgid   "Profile %s renamed to %s"
 msgstr  ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid   "Profile to apply to the new container"
 msgstr  ""
 
@@ -908,7 +948,7 @@ msgstr  ""
 msgid   "Profiles %s applied to %s"
 msgstr  ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid   "Profiles: %s"
 msgstr  ""
@@ -939,11 +979,11 @@ msgstr  ""
 msgid   "Remote admin password"
 msgstr  ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid   "Remote operation canceled by user"
 msgstr  ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid   "Remote: %s"
 msgstr  ""
@@ -953,7 +993,7 @@ msgstr  ""
 msgid   "Remove %s (yes/no): "
 msgstr  ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid   "Renamed storage volume from \"%s\" to \"%s\""
 msgstr  ""
@@ -962,11 +1002,11 @@ msgstr  ""
 msgid   "Require user confirmation"
 msgstr  ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid   "Resources:"
 msgstr  ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid   "Restart containers."
 msgstr  ""
 
@@ -974,7 +1014,7 @@ msgstr  ""
 msgid   "Retrieve the container's console log"
 msgstr  ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid   "Retrieving image: %s"
 msgstr  ""
@@ -987,15 +1027,15 @@ msgstr  ""
 msgid   "SIZE"
 msgstr  ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid   "SNAPSHOTS"
 msgstr  ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid   "SOURCE"
 msgstr  ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid   "STATE"
 msgstr  ""
 
@@ -1007,7 +1047,7 @@ msgstr  ""
 msgid   "STATUS"
 msgstr  ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid   "STORAGE POOL"
 msgstr  ""
 
@@ -1059,11 +1099,11 @@ msgstr  ""
 msgid   "Show the resources available to the server"
 msgstr  ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid   "Show the resources available to the storage pool"
 msgstr  ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid   "Show the used and free space in bytes"
 msgstr  ""
 
@@ -1072,7 +1112,7 @@ msgstr  ""
 msgid   "Size: %.2fMB"
 msgstr  ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid   "Snapshots:"
 msgstr  ""
 
@@ -1085,7 +1125,7 @@ msgstr  ""
 msgid   "Source:"
 msgstr  ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid   "Start containers."
 msgstr  ""
 
@@ -1094,12 +1134,12 @@ msgstr  ""
 msgid   "Starting %s"
 msgstr  ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid   "Status: %s"
 msgstr  ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid   "Stop containers."
 msgstr  ""
 
@@ -1116,26 +1156,31 @@ msgstr  ""
 msgid   "Stopping the container failed: %s"
 msgstr  ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid   "Storage pool %s created"
 msgstr  ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid   "Storage pool %s deleted"
 msgstr  ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid   "Storage pool %s pending on node %s"
+msgstr  ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid   "Storage pool name"
 msgstr  ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid   "Storage volume %s created"
 msgstr  ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid   "Storage volume %s deleted"
 msgstr  ""
@@ -1144,15 +1189,15 @@ msgstr  ""
 msgid   "Store the container state (only for stop)"
 msgstr  ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid   "Swap (current)"
 msgstr  ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid   "Swap (peak)"
 msgstr  ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid   "TYPE"
 msgstr  ""
 
@@ -1164,7 +1209,7 @@ msgstr  ""
 msgid   "The container is currently running. Use --force to have it stopped and restarted."
 msgstr  ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid   "The container you are starting doesn't have any network attached to it."
 msgstr  ""
 
@@ -1176,7 +1221,7 @@ msgstr  ""
 msgid   "The device doesn't exist"
 msgstr  ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid   "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr  ""
@@ -1185,11 +1230,11 @@ msgstr  ""
 msgid   "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr  ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid   "The specified device doesn't exist"
 msgstr  ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid   "The specified device doesn't match the network"
 msgstr  ""
 
@@ -1212,11 +1257,11 @@ msgstr  ""
 msgid   "Timestamps:"
 msgstr  ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid   "To attach a network to a container, use: lxc network attach"
 msgstr  ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid   "To create a new network, use: lxc network create"
 msgstr  ""
 
@@ -1247,11 +1292,11 @@ msgstr  ""
 msgid   "Try `lxc info --show-log %s` for more info"
 msgstr  ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid   "Type: ephemeral"
 msgstr  ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid   "Type: persistent"
 msgstr  ""
 
@@ -1259,11 +1304,11 @@ msgstr  ""
 msgid   "UPLOAD DATE"
 msgstr  ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid   "URL"
 msgstr  ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid   "USED BY"
 msgstr  ""
 
@@ -1292,6 +1337,24 @@ msgstr  ""
 msgid   "Usage: lxc <command> [options]"
 msgstr  ""
 
+#: lxc/cluster.go:22
+msgid   "Usage: lxc cluster <subcommand> [options]\n"
+        "\n"
+        "Manage cluster nodes.\n"
+        "\n"
+        "lxc cluster list [<remote>:]\n"
+        "    List all nodes in the cluster.\n"
+        "\n"
+        "lxc cluster show [<remote>:]<node>\n"
+        "    Show details of a node.\n"
+        "\n"
+        "lxc cluster rename [<remote>:]<node> <new-name>\n"
+        "    Rename a cluster node.\n"
+        "\n"
+        "lxc cluster delete [<remote>:]<node> [--force]\n"
+        "    Delete a node from the cluster."
+msgstr  ""
+
 #: lxc/config.go:85
 msgid   "Usage: lxc config <subcommand> [options]\n"
         "\n"
@@ -1574,8 +1637,8 @@ msgid   "Usage: lxc info [<remote>:][<container>] [--show-log] [--resources]\n"
         "    For LXD server information."
 msgstr  ""
 
-#: lxc/init.go:77
-msgid   "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+#: lxc/init.go:78
+msgid   "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target <node>]\n"
         "\n"
         "Create containers from images.\n"
         "\n"
@@ -1587,7 +1650,7 @@ msgid   "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e]
 msgstr  ""
 
 #: lxc/launch.go:20
-msgid   "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+msgid   "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target <node>]\n"
         "\n"
         "Create and start containers from images.\n"
         "\n"
@@ -1662,6 +1725,8 @@ msgid   "Usage: lxc list [<remote>:] [filters] [--format csv|json|table|yaml] [-
         "\n"
         "	t - Type (persistent or ephemeral)\n"
         "\n"
+        "	H - Node hosting the container\n"
+        "\n"
         "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
         "\n"
         "	KEY: The (extended) config key to display\n"
@@ -1718,7 +1783,7 @@ msgid   "Usage: lxc move [<remote>:]<container>[/<snapshot>] [<remote>:][<contai
         "    Rename a snapshot."
 msgstr  ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid   "Usage: lxc network <subcommand> [options]\n"
         "\n"
         "Manage and attach containers to networks.\n"
@@ -1726,13 +1791,13 @@ msgid   "Usage: lxc network <subcommand> [options]\n"
         "lxc network list [<remote>:]\n"
         "    List available networks.\n"
         "\n"
-        "lxc network show [<remote>:]<network>\n"
+        "lxc network show [<remote>:]<network> [--target <node>]\n"
         "    Show details of a network.\n"
         "\n"
-        "lxc network create [<remote>:]<network> [key=value...]\n"
+        "lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
         "    Create a network.\n"
         "\n"
-        "lxc network get [<remote>:]<network> <key>\n"
+        "lxc network get [<remote>:]<network> <key> [--target <node>]\n"
         "    Get network configuration.\n"
         "\n"
         "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -1945,7 +2010,7 @@ msgid   "Usage: lxc snapshot [<remote>:]<container> <snapshot name> [--stateful]
         "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr  ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid   "Usage: lxc storage <subcommand> [options]\n"
         "\n"
         "Manage storage pools and volumes.\n"
@@ -1954,13 +2019,13 @@ msgid   "Usage: lxc storage <subcommand> [options]\n"
         "lxc storage list [<remote>:]\n"
         "    List available storage pools.\n"
         "\n"
-        "lxc storage show [<remote>:]<pool> [--resources]\n"
+        "lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
         "    Show details of a storage pool.\n"
         "\n"
         "lxc storage info [<remote>:]<pool> [--bytes]\n"
         "    Show information of a storage pool in yaml format.\n"
         "\n"
-        "lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+        "lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target <node>]\n"
         "    Create a storage pool.\n"
         "\n"
         "lxc storage get [<remote>:]<pool> <key>\n"
@@ -1982,28 +2047,28 @@ msgid   "Usage: lxc storage <subcommand> [options]\n"
         "lxc storage volume list [<remote>:]<pool>\n"
         "    List available storage volumes on a storage pool.\n"
         "\n"
-        "lxc storage volume show [<remote>:]<pool> <volume>\n"
-        "    Show details of a storage volume on a storage pool.\n"
+        "lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+        "   Show details of a storage volume on a storage pool.\n"
         "\n"
-        "lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+        "lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--target <node>]\n"
         "    Create a storage volume on a storage pool.\n"
         "\n"
-        "lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+        "lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target <node>]\n"
         "    Rename a storage volume on a storage pool.\n"
         "\n"
-        "lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+        "lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
         "    Get storage volume configuration on a storage pool.\n"
         "\n"
-        "lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+        "lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target <node>]\n"
         "    Set storage volume configuration on a storage pool.\n"
         "\n"
-        "lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+        "lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
         "    Unset storage volume configuration on a storage pool.\n"
         "\n"
-        "lxc storage volume delete [<remote>:]<pool> <volume>\n"
+        "lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
         "    Delete a storage volume on a storage pool.\n"
         "\n"
-        "lxc storage volume edit [<remote>:]<pool> <volume>\n"
+        "lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
         "    Edit storage volume, either by launching external editor or reading STDIN.\n"
         "\n"
         "lxc storage volume attach [<remote>:]<pool> <volume> <container> [device name] <path>\n"
@@ -2044,7 +2109,7 @@ msgstr  ""
 msgid   "User aborted delete operation."
 msgstr  ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid   "User signaled us three times, exiting. The remote operation will keep running."
 msgstr  ""
 
@@ -2060,7 +2125,7 @@ msgstr  ""
 msgid   "Whether or not to snapshot the container's running state"
 msgstr  ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid   "YES"
 msgstr  ""
 
@@ -2092,11 +2157,11 @@ msgstr  ""
 msgid   "default"
 msgstr  ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid   "description"
 msgstr  ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid   "didn't get any affected image, container or snapshot from server"
 msgstr  ""
 
@@ -2104,7 +2169,7 @@ msgstr  ""
 msgid   "disabled"
 msgstr  ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid   "driver"
 msgstr  ""
 
@@ -2122,11 +2187,11 @@ msgstr  ""
 msgid   "error: unknown command: %s"
 msgstr  ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid   "info"
 msgstr  ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid   "name"
 msgstr  ""
 
@@ -2138,7 +2203,7 @@ msgstr  ""
 msgid   "ok (y/n)?"
 msgstr  ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid   "processing aliases failed %s\n"
 msgstr  ""
@@ -2167,32 +2232,32 @@ msgstr  ""
 msgid   "remote %s is static and cannot be modified"
 msgstr  ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid   "space used"
 msgstr  ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid   "stateful"
 msgstr  ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid   "stateless"
 msgstr  ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid   "taken at %s"
 msgstr  ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid   "total space"
 msgstr  ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid   "used by"
 msgstr  ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid   "wrong number of subcommand arguments"
 msgstr  ""
 
diff --git a/po/nb_NO.po b/po/nb_NO.po
index 0f41c8970..9c5cf1d20 100644
--- a/po/nb_NO.po
+++ b/po/nb_NO.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/nl.po b/po/nl.po
index 2b2c6fb13..3a781f83d 100644
--- a/po/nl.po
+++ b/po/nl.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/pl.po b/po/pl.po
index 3c6ddfe4d..f7681c149 100644
--- a/po/pl.po
+++ b/po/pl.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/pt_BR.po b/po/pt_BR.po
index 4ec5757f4..fffd947bd 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/ru.po b/po/ru.po
index 9bdf106d8..2b81a2d7e 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: 2017-09-05 16:48+0000\n"
 "Last-Translator: Ilya Yakimavets <ilya.yakimavets at backend.expert>\n"
 "Language-Team: Russian <https://hosted.weblate.org/projects/linux-containers/"
@@ -20,7 +20,7 @@ msgstr ""
 "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;\n"
 "X-Generator: Weblate 2.17-dev\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -50,7 +50,7 @@ msgstr ""
 "###   source: /home/chb/mnt/lxd_test/default.img\n"
 "###   zfs.pool_name: default"
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -154,7 +154,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -238,7 +238,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -265,7 +265,7 @@ msgstr "ПСЕВДОНИМ"
 msgid "ARCH"
 msgstr "ARCH"
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr "АРХИТЕКТУРА"
 
@@ -290,7 +290,7 @@ msgstr "Пароль администратора для %s: "
 msgid "Aliases:"
 msgstr "Псевдонимы:"
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr "Архитектура: %s"
@@ -314,11 +314,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr "Получено байтов"
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr "Отправлено байтов"
 
@@ -330,11 +330,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr "ОБЩЕЕ ИМЯ"
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr "Использование ЦП (в секундах)"
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 #, fuzzy
 msgid "CPU usage:"
 msgstr " Использование ЦП:"
@@ -344,7 +344,7 @@ msgstr " Использование ЦП:"
 msgid "CREATED"
 msgstr "СОЗДАН"
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr "СОЗДАН"
 
@@ -357,7 +357,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr "Невозможно прочитать из стандартного ввода: %s"
@@ -376,7 +376,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr "Невозможно добавить имя контейнера в список"
 
@@ -389,7 +389,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr "Сертификат клиента хранится на сервере: "
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr "Столбцы"
 
@@ -397,12 +397,12 @@ msgstr "Столбцы"
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -419,7 +419,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr "Имя контейнера является обязательным"
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr "Имя контейнера: %s"
@@ -454,26 +454,30 @@ msgstr "Не удалось создать каталог сертификата
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -491,7 +495,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -508,12 +512,12 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 #, fuzzy
 msgid "Disk usage:"
 msgstr " Использование диска:"
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -537,7 +541,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -572,7 +576,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -591,7 +595,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -601,11 +605,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -618,6 +622,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -630,7 +638,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -642,11 +650,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -693,7 +701,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -730,7 +738,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -738,7 +746,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -755,14 +763,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -771,24 +783,24 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 #, fuzzy
 msgid "Memory usage:"
 msgstr " Использование памяти:"
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -808,40 +820,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 #, fuzzy
 msgid "Network usage:"
 msgstr " Использование сети:"
@@ -854,11 +875,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -866,7 +887,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, fuzzy, c-format
+msgid "Node: %s"
+msgstr "Авто-обновление: %s"
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -878,7 +918,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -895,19 +935,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -919,11 +959,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -935,7 +975,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -943,12 +983,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -973,7 +1013,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -1003,7 +1043,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -1012,7 +1052,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -1043,11 +1083,11 @@ msgstr "Копирование образа: %s"
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -1057,7 +1097,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1066,11 +1106,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1078,7 +1118,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1091,15 +1131,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1111,7 +1151,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1163,11 +1203,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1176,7 +1216,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1189,7 +1229,7 @@ msgstr "Невозможно добавить имя контейнера в с
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1198,12 +1238,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1220,26 +1260,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr "Невозможно добавить имя контейнера в список"
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1248,15 +1293,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1270,7 +1315,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1283,7 +1328,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1292,11 +1337,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1320,11 +1365,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1355,11 +1400,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1367,11 +1412,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1404,6 +1449,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1725,11 +1789,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1744,7 +1809,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1825,6 +1891,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1890,7 +1958,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1899,13 +1967,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2142,7 +2210,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2152,13 +2220,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2181,28 +2250,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2252,7 +2324,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2272,7 +2344,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2304,11 +2376,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2316,7 +2388,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2334,11 +2406,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2350,7 +2422,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2379,32 +2451,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/sr.po b/po/sr.po
index e398da942..17764c13c 100644
--- a/po/sr.po
+++ b/po/sr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/sv.po b/po/sv.po
index 686e5dc93..b51f4021a 100644
--- a/po/sv.po
+++ b/po/sv.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/tr.po b/po/tr.po
index cfd21eb4a..9600e57d6 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/zh.po b/po/zh.po
index 4565d52a9..8dbca1ce5 100644
--- a/po/zh.po
+++ b/po/zh.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/zh_Hans.po b/po/zh_Hans.po
index 7fbfe611d..6e1f1a07e 100644
--- a/po/zh_Hans.po
+++ b/po/zh_Hans.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2018-01-21 21:48+0000\n"
+"POT-Creation-Date: 2018-01-24 13:34+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:34
+#: lxc/storage.go:35
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:51
+#: lxc/storage.go:52
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -102,7 +102,7 @@ msgid ""
 "###  description: My custom image"
 msgstr ""
 
-#: lxc/network.go:30
+#: lxc/network.go:32
 msgid ""
 "### This is a yaml representation of the network.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -153,7 +153,7 @@ msgstr ""
 msgid "%s is not a directory"
 msgstr ""
 
-#: lxc/utils.go:375
+#: lxc/utils.go:376
 #, c-format
 msgid "%v (interrupt two more times to force)"
 msgstr ""
@@ -179,7 +179,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:463
+#: lxc/list.go:467
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -204,7 +204,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Both --all and container name given"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "CREATED AT"
 msgstr ""
 
@@ -269,7 +269,7 @@ msgstr ""
 msgid "Can't pull a directory without --recursive"
 msgstr ""
 
-#: lxc/config.go:207 lxc/network.go:560
+#: lxc/config.go:207 lxc/network.go:587
 #, c-format
 msgid "Can't read from stdin: %s"
 msgstr ""
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:663
+#: lxc/profile.go:546 lxc/storage.go:675
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:130 lxc/list.go:131
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:134 lxc/list.go:135
 msgid "Columns"
 msgstr ""
 
@@ -309,12 +309,12 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:614 lxc/storage.go:1067
+#: lxc/network.go:440 lxc/profile.go:275 lxc/storage.go:626 lxc/storage.go:1138
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,26 +366,30 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:465 lxc/network.go:525
-#: lxc/storage.go:692 lxc/storage.go:881
+#: lxc/cluster.go:184
+msgid "DATABASE"
+msgstr ""
+
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:469 lxc/network.go:547
+#: lxc/storage.go:711 lxc/storage.go:916
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:693
+#: lxc/storage.go:712
 msgid "DRIVER"
 msgstr ""
 
@@ -403,7 +407,7 @@ msgstr ""
 msgid "Device %s removed from %s"
 msgstr ""
 
-#: lxc/utils.go:307 lxc/utils.go:331
+#: lxc/utils.go:308 lxc/utils.go:332
 #, c-format
 msgid "Device already exists: %s"
 msgstr ""
@@ -420,11 +424,11 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:622
+#: lxc/list.go:638
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -448,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -483,7 +487,7 @@ msgstr ""
 msgid "FINGERPRINT"
 msgstr ""
 
-#: lxc/utils.go:413
+#: lxc/utils.go:414
 #, c-format
 msgid "Failed to create alias %s"
 msgstr ""
@@ -502,7 +506,7 @@ msgstr ""
 msgid "Failed to get the new container name"
 msgstr ""
 
-#: lxc/utils.go:403
+#: lxc/utils.go:404
 #, c-format
 msgid "Failed to remove alias %s"
 msgstr ""
@@ -512,11 +516,11 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:133
+#: lxc/list.go:137
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
-#: lxc/network.go:486 lxc/operation.go:121
+#: lxc/network.go:504 lxc/operation.go:121
 msgid "Filtering isn't supported yet"
 msgstr ""
 
@@ -529,6 +533,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:41
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:47 lxc/action.go:48
 msgid "Force the container to shutdown"
 msgstr ""
@@ -541,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:132
+#: lxc/image.go:180 lxc/list.go:136
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -553,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "IPV6"
 msgstr ""
 
@@ -604,7 +612,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -641,7 +649,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -649,7 +657,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "LAST USED AT"
 msgstr ""
 
@@ -666,14 +674,18 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
-#: lxc/network.go:524
+#: lxc/network.go:546
 msgid "MANAGED"
 msgstr ""
 
+#: lxc/cluster.go:186
+msgid "MESSAGE"
+msgstr ""
+
 #: lxc/image.go:176
 msgid "Make image public"
 msgstr ""
@@ -682,23 +694,23 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
-#: lxc/utils.go:258
+#: lxc/utils.go:259
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:412 lxc/storage.go:532
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:414 lxc/storage.go:544
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -718,40 +730,49 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:467 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:691 lxc/storage.go:880
+#: lxc/cluster.go:182 lxc/list.go:471 lxc/network.go:544 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:710 lxc/storage.go:915
 msgid "NAME"
 msgstr ""
 
-#: lxc/network.go:508 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
+#: lxc/network.go:526 lxc/operation.go:139 lxc/remote.go:380 lxc/remote.go:385
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:492 lxc/storage.go:920
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
 msgstr ""
 
-#: lxc/network.go:258
+#: lxc/network.go:271
 #, c-format
 msgid "Network %s created"
 msgstr ""
 
-#: lxc/network.go:374
+#: lxc/network.go:388
 #, c-format
 msgid "Network %s deleted"
 msgstr ""
 
-#: lxc/network.go:451
+#: lxc/network.go:269
+#, c-format
+msgid "Network %s pending on node %s"
+msgstr ""
+
+#: lxc/network.go:465
 #, c-format
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -763,11 +784,11 @@ msgstr ""
 msgid "No certificate provided to add"
 msgstr ""
 
-#: lxc/network.go:293 lxc/network.go:346
+#: lxc/network.go:307 lxc/network.go:360
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:421 lxc/storage.go:541
+#: lxc/storage.go:423 lxc/storage.go:553
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -775,7 +796,26 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/storage.go:365 lxc/storage.go:458
+#: lxc/cluster.go:142
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
+#: lxc/init.go:149 lxc/network.go:102 lxc/storage.go:158
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
+#: lxc/storage.go:367 lxc/storage.go:460
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -787,7 +827,7 @@ msgstr ""
 msgid "Only https:// is supported for remote image import."
 msgstr ""
 
-#: lxc/network.go:402 lxc/network.go:546
+#: lxc/network.go:416 lxc/network.go:573
 msgid "Only managed networks can be modified."
 msgstr ""
 
@@ -804,19 +844,19 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:624
+#: lxc/list.go:640
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/list.go:473
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "PROCESSES"
 msgstr ""
 
-#: lxc/list.go:470
+#: lxc/list.go:474
 msgid "PROFILES"
 msgstr ""
 
@@ -828,11 +868,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -844,7 +884,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -852,12 +892,12 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:615 lxc/storage.go:1068
+#: lxc/network.go:441 lxc/profile.go:276 lxc/storage.go:627 lxc/storage.go:1139
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -882,7 +922,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -912,7 +952,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -921,7 +961,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -952,11 +992,11 @@ msgstr ""
 msgid "Remote admin password"
 msgstr ""
 
-#: lxc/utils.go:366
+#: lxc/utils.go:367
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -966,7 +1006,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1099
+#: lxc/storage.go:1176
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -975,11 +1015,11 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -987,7 +1027,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1000,15 +1040,15 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:694
+#: lxc/storage.go:717
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:472
+#: lxc/cluster.go:185 lxc/list.go:476 lxc/network.go:551 lxc/storage.go:715
 msgid "STATE"
 msgstr ""
 
@@ -1020,7 +1060,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:474
+#: lxc/list.go:478
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1072,11 +1112,11 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:155
+#: lxc/storage.go:156
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
-#: lxc/storage.go:156
+#: lxc/storage.go:157
 msgid "Show the used and free space in bytes"
 msgstr ""
 
@@ -1085,7 +1125,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1098,7 +1138,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1107,12 +1147,12 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1129,26 +1169,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:506
+#: lxc/storage.go:517
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:565
+#: lxc/storage.go:577
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/storage.go:515
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:914
+#: lxc/storage.go:959
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:929
+#: lxc/storage.go:979
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1157,15 +1202,15 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:473 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:879
+#: lxc/list.go:477 lxc/network.go:545 lxc/operation.go:153 lxc/storage.go:914
 msgid "TYPE"
 msgstr ""
 
@@ -1179,7 +1224,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1192,7 +1237,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1201,11 +1246,11 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:426 lxc/storage.go:546
+#: lxc/network.go:312 lxc/network.go:365 lxc/storage.go:428 lxc/storage.go:558
 msgid "The specified device doesn't exist"
 msgstr ""
 
-#: lxc/network.go:302 lxc/network.go:355
+#: lxc/network.go:316 lxc/network.go:369
 msgid "The specified device doesn't match the network"
 msgstr ""
 
@@ -1229,11 +1274,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1264,11 +1309,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1276,11 +1321,11 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:183 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:695 lxc/storage.go:882
+#: lxc/network.go:548 lxc/profile.go:574 lxc/storage.go:719 lxc/storage.go:917
 msgid "USED BY"
 msgstr ""
 
@@ -1310,6 +1355,25 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:22
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -1626,11 +1690,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -1645,7 +1710,8 @@ msgstr ""
 msgid ""
 "Usage: lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create and start containers from images.\n"
 "\n"
@@ -1726,6 +1792,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tH - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
@@ -1791,7 +1859,7 @@ msgid ""
 "    Rename a snapshot."
 msgstr ""
 
-#: lxc/network.go:50
+#: lxc/network.go:52
 msgid ""
 "Usage: lxc network <subcommand> [options]\n"
 "\n"
@@ -1800,13 +1868,13 @@ msgid ""
 "lxc network list [<remote>:]\n"
 "    List available networks.\n"
 "\n"
-"lxc network show [<remote>:]<network>\n"
+"lxc network show [<remote>:]<network> [--target <node>]\n"
 "    Show details of a network.\n"
 "\n"
-"lxc network create [<remote>:]<network> [key=value...]\n"
+"lxc network create [<remote>:]<network> [key=value...] [--target <node>]\n"
 "    Create a network.\n"
 "\n"
-"lxc network get [<remote>:]<network> <key>\n"
+"lxc network get [<remote>:]<network> <key> [--target <node>]\n"
 "    Get network configuration.\n"
 "\n"
 "lxc network set [<remote>:]<network> <key> <value>\n"
@@ -2039,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:65
+#: lxc/storage.go:66
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2049,13 +2117,14 @@ msgid ""
 "lxc storage list [<remote>:]\n"
 "    List available storage pools.\n"
 "\n"
-"lxc storage show [<remote>:]<pool> [--resources]\n"
+"lxc storage show [<remote>:]<pool> [--resources] [--target <node>]\n"
 "    Show details of a storage pool.\n"
 "\n"
 "lxc storage info [<remote>:]<pool> [--bytes]\n"
 "    Show information of a storage pool in yaml format.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
@@ -2078,28 +2147,31 @@ msgid ""
 "lxc storage volume list [<remote>:]<pool>\n"
 "    List available storage volumes on a storage pool.\n"
 "\n"
-"lxc storage volume show [<remote>:]<pool> <volume>\n"
-"    Show details of a storage volume on a storage pool.\n"
+"lxc storage volume show [<remote>:]<pool> <volume> [--target <node>]\n"
+"   Show details of a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume create [<remote>:]<pool> <volume> [key=value]...\n"
+"lxc storage volume create [<remote>:]<pool> <volume> [key=value]... [--"
+"target <node>]\n"
 "    Create a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume rename [<remote>:]<pool> <old name> <new name>\n"
+"lxc storage volume rename [<remote>:]<pool> <old name> <new name> [--target "
+"<node>]\n"
 "    Rename a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume get [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume get [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Get storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume set [<remote>:]<pool> <volume> <key> <value>\n"
+"lxc storage volume set [<remote>:]<pool> <volume> <key> <value> [--target "
+"<node>]\n"
 "    Set storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume unset [<remote>:]<pool> <volume> <key>\n"
+"lxc storage volume unset [<remote>:]<pool> <volume> <key> [--target <node>]\n"
 "    Unset storage volume configuration on a storage pool.\n"
 "\n"
-"lxc storage volume delete [<remote>:]<pool> <volume>\n"
+"lxc storage volume delete [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Delete a storage volume on a storage pool.\n"
 "\n"
-"lxc storage volume edit [<remote>:]<pool> <volume>\n"
+"lxc storage volume edit [<remote>:]<pool> <volume> [--target <node>]\n"
 "    Edit storage volume, either by launching external editor or reading "
 "STDIN.\n"
 "\n"
@@ -2149,7 +2221,7 @@ msgstr ""
 msgid "User aborted delete operation."
 msgstr ""
 
-#: lxc/utils.go:371
+#: lxc/utils.go:372
 msgid ""
 "User signaled us three times, exiting. The remote operation will keep "
 "running."
@@ -2169,7 +2241,7 @@ msgstr ""
 msgid "Whether or not to snapshot the container's running state"
 msgstr ""
 
-#: lxc/network.go:510 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
+#: lxc/network.go:528 lxc/operation.go:141 lxc/remote.go:382 lxc/remote.go:387
 msgid "YES"
 msgstr ""
 
@@ -2201,11 +2273,11 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/storage.go:804
+#: lxc/storage.go:835
 msgid "description"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2213,7 +2285,7 @@ msgstr ""
 msgid "disabled"
 msgstr ""
 
-#: lxc/storage.go:803
+#: lxc/storage.go:834
 msgid "driver"
 msgstr ""
 
@@ -2231,11 +2303,11 @@ msgstr ""
 msgid "error: unknown command: %s"
 msgstr ""
 
-#: lxc/storage.go:801
+#: lxc/storage.go:832
 msgid "info"
 msgstr ""
 
-#: lxc/storage.go:802
+#: lxc/storage.go:833
 msgid "name"
 msgstr ""
 
@@ -2247,7 +2319,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2276,32 +2348,32 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/storage.go:806
+#: lxc/storage.go:837
 msgid "space used"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/storage.go:805
+#: lxc/storage.go:836
 msgid "total space"
 msgstr ""
 
-#: lxc/storage.go:800
+#: lxc/storage.go:831
 msgid "used by"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 

From 5e8a09625365bfe48ed04f068d3e8cd298912745 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 24 Jan 2018 14:28:08 +0000
Subject: [PATCH 219/227] Support lxc console on remote nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_console.go    | 51 ++++++++++++++++++++++++++++++++++-----------
 lxd/container_exec.go       | 18 +++++++---------
 test/includes/clustering.sh |  1 -
 3 files changed, 46 insertions(+), 24 deletions(-)

diff --git a/lxd/container_console.go b/lxd/container_console.go
index 5e11e3255..5875396b6 100644
--- a/lxd/container_console.go
+++ b/lxd/container_console.go
@@ -15,6 +15,7 @@ import (
 	"github.com/gorilla/websocket"
 	"gopkg.in/lxc/go-lxc.v2"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -252,29 +253,45 @@ func (s *consoleWs) Do(op *operation) error {
 
 func containerConsolePost(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
-	c, err := containerLoadByName(d.State(), name)
+
+	post := api.ContainerConsolePost{}
+	buf, err := ioutil.ReadAll(r.Body)
 	if err != nil {
-		return SmartError(err)
+		return BadRequest(err)
 	}
 
-	err = fmt.Errorf("Container is not running")
-	if !c.IsRunning() {
+	err = json.Unmarshal(buf, &post)
+	if err != nil {
 		return BadRequest(err)
 	}
 
-	err = fmt.Errorf("Container is frozen")
-	if c.IsFrozen() {
-		return BadRequest(err)
+	// Forward the request if the container is remote.
+	cert := d.endpoints.NetworkCert()
+	client, err := cluster.ConnectIfContainerIsRemote(d.cluster, name, cert)
+	if err != nil {
+		return SmartError(err)
+	}
+	if client != nil {
+		url := fmt.Sprintf("/containers/%s/console", name)
+		op, _, err := client.RawOperation("POST", url, post, "")
+		if err != nil {
+			return SmartError(err)
+		}
+		return ForwardedOperationResponse(&op.Operation)
 	}
 
-	post := api.ContainerConsolePost{}
-	buf, err := ioutil.ReadAll(r.Body)
+	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
+		return SmartError(err)
+	}
+
+	err = fmt.Errorf("Container is not running")
+	if !c.IsRunning() {
 		return BadRequest(err)
 	}
 
-	err = json.Unmarshal(buf, &post)
-	if err != nil {
+	err = fmt.Errorf("Container is frozen")
+	if c.IsFrozen() {
 		return BadRequest(err)
 	}
 
@@ -320,11 +337,21 @@ func containerConsolePost(d *Daemon, r *http.Request) Response {
 }
 
 func containerConsoleLogGet(d *Daemon, r *http.Request) Response {
+	name := mux.Vars(r)["name"]
+
+	// Forward the request if the container is remote.
+	response, err := ForwardedResponseIfContainerIsRemote(d, r, name)
+	if err != nil {
+		return SmartError(err)
+	}
+	if response != nil {
+		return response
+	}
+
 	if !util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
 		return BadRequest(fmt.Errorf("Querying the console buffer requires liblxc >= 3.0"))
 	}
 
-	name := mux.Vars(r)["name"]
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
diff --git a/lxd/container_exec.go b/lxd/container_exec.go
index 866398419..6be7ed092 100644
--- a/lxd/container_exec.go
+++ b/lxd/container_exec.go
@@ -16,7 +16,6 @@ import (
 	"github.com/gorilla/mux"
 	"github.com/gorilla/websocket"
 
-	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -344,13 +343,19 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
+	// Forward the request if the container is remote.
 	cert := d.endpoints.NetworkCert()
 	client, err := cluster.ConnectIfContainerIsRemote(d.cluster, name, cert)
 	if err != nil {
 		return SmartError(err)
 	}
 	if client != nil {
-		return containerExecPostCluster(client, name, post)
+		url := fmt.Sprintf("/containers/%s/exec", name)
+		op, _, err := client.RawOperation("POST", url, post, "")
+		if err != nil {
+			return SmartError(err)
+		}
+		return ForwardedOperationResponse(&op.Operation)
 	}
 
 	c, err := containerLoadByName(d.State(), name)
@@ -506,12 +511,3 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 
 	return OperationResponse(op)
 }
-
-// Perform an exec request for a container running on a different cluster node.
-func containerExecPostCluster(client lxd.ContainerServer, name string, req api.ContainerExecPost) Response {
-	op, _, err := client.RawOperation("POST", fmt.Sprintf("/containers/%s/exec", name), req, "")
-	if err != nil {
-		return SmartError(err)
-	}
-	return ForwardedOperationResponse(&op.Operation)
-}
diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index f2502b6e4..55ad3e810 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -97,7 +97,6 @@ lxc.hook.autodev = mknod /dev/loop4 c 7 4
 lxc.hook.autodev = mknod /dev/loop5 c 7 5
 lxc.hook.autodev = mknod /dev/loop6 c 7 6
 lxc.hook.autodev = mknod /dev/loop7 c 7 7
-lxc.hook.autodev = mknod /dev/console c 5 1
 EOF
   lxc-execute -n "${ns}" --rcfile "${rcfile}" -- sh -c 'while true; do sleep 1; done' &
   sleep 1

From 3db48aecb1285f00cf9e40f33d39477b625543b5 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 24 Jan 2018 14:47:31 +0000
Subject: [PATCH 220/227] Fix lxc config in clustering integration tests

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/includes/clustering.sh | 1 +
 1 file changed, 1 insertion(+)

diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index 55ad3e810..58e8d8069 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -37,6 +37,7 @@ setup_clustering_netns() {
   echo "==> Setup clustering netns ${ns}"
 
   cat > "${rcfile}" <<EOF
+lxc.console.path=none
 lxc.mount.entry = cgroup                 sys/fs/cgroup                  tmpfs   rw,nosuid,nodev,noexec,mode=755,create=dir                                   0 0
 lxc.mount.entry = cgroup2                sys/fs/cgroup/unified          cgroup2 rw,nosuid,nodev,noexec,relatime,create=dir                                   0 0
 lxc.mount.entry = name=systemd           sys/fs/cgroup/systemd          cgroup  rw,nosuid,nodev,noexec,relatime,xattr,clone_children,name=systemd,create=dir 0 0

From 9390a1f01505a00a11710ea4421a2ee6e0dd3c34 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 25 Jan 2018 11:24:36 +0000
Subject: [PATCH 221/227] Use CanonicalLtd/go-sqlite3 instead of
 mattn/go-sqlite3

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go                    | 1 -
 lxd/db/devices.go                | 2 --
 lxd/db/images.go                 | 2 --
 lxd/db/networks.go               | 2 --
 lxd/db/node/sqlite.go            | 2 +-
 lxd/db/profiles.go               | 2 --
 lxd/db/query/retry.go            | 2 +-
 lxd/db/query/slices_test.go      | 1 -
 lxd/db/query/transaction_test.go | 1 -
 lxd/db/schema/schema_test.go     | 1 -
 lxd/db/schema/update.go          | 2 +-
 lxd/db/storage_pools.go          | 1 -
 lxd/db/testing.go                | 2 +-
 lxd/devices.go                   | 2 --
 lxd/main_activateifneeded.go     | 9 ++++-----
 lxd/profiles.go                  | 1 -
 lxd/response.go                  | 2 +-
 17 files changed, 9 insertions(+), 26 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 3d8059640..5c1edb5d2 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -19,7 +19,6 @@ import (
 
 	"github.com/gorilla/mux"
 	"github.com/juju/idmclient"
-	_ "github.com/mattn/go-sqlite3"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"gopkg.in/macaroon-bakery.v2/bakery"
diff --git a/lxd/db/devices.go b/lxd/db/devices.go
index 94c0b4f69..4f0b4cfa3 100644
--- a/lxd/db/devices.go
+++ b/lxd/db/devices.go
@@ -4,8 +4,6 @@ import (
 	"database/sql"
 	"fmt"
 
-	_ "github.com/mattn/go-sqlite3"
-
 	"github.com/lxc/lxd/lxd/types"
 )
 
diff --git a/lxd/db/images.go b/lxd/db/images.go
index 65dc1978b..6fe2e7e43 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -5,8 +5,6 @@ import (
 	"fmt"
 	"time"
 
-	_ "github.com/mattn/go-sqlite3"
-
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/osarch"
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 4164cdea7..830e20336 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -5,8 +5,6 @@ import (
 	"fmt"
 	"strings"
 
-	_ "github.com/mattn/go-sqlite3"
-
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
diff --git a/lxd/db/node/sqlite.go b/lxd/db/node/sqlite.go
index eecef80ab..235b8a776 100644
--- a/lxd/db/node/sqlite.go
+++ b/lxd/db/node/sqlite.go
@@ -4,7 +4,7 @@ import (
 	"database/sql"
 	"fmt"
 
-	"github.com/mattn/go-sqlite3"
+	"github.com/CanonicalLtd/go-sqlite3"
 )
 
 func init() {
diff --git a/lxd/db/profiles.go b/lxd/db/profiles.go
index bddfb317c..8c95e4a0b 100644
--- a/lxd/db/profiles.go
+++ b/lxd/db/profiles.go
@@ -4,8 +4,6 @@ import (
 	"database/sql"
 	"fmt"
 
-	_ "github.com/mattn/go-sqlite3"
-
 	"github.com/lxc/lxd/lxd/types"
 	"github.com/lxc/lxd/shared/api"
 )
diff --git a/lxd/db/query/retry.go b/lxd/db/query/retry.go
index 6209ab541..2ebe47fa0 100644
--- a/lxd/db/query/retry.go
+++ b/lxd/db/query/retry.go
@@ -4,8 +4,8 @@ import (
 	"strings"
 	"time"
 
+	"github.com/CanonicalLtd/go-sqlite3"
 	"github.com/lxc/lxd/shared/logger"
-	sqlite3 "github.com/mattn/go-sqlite3"
 )
 
 // Retry wraps a function that interacts with the database, and retries it in
diff --git a/lxd/db/query/slices_test.go b/lxd/db/query/slices_test.go
index 36e31a5b9..2b4a1cce2 100644
--- a/lxd/db/query/slices_test.go
+++ b/lxd/db/query/slices_test.go
@@ -4,7 +4,6 @@ import (
 	"database/sql"
 	"testing"
 
-	_ "github.com/mattn/go-sqlite3"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 
diff --git a/lxd/db/query/transaction_test.go b/lxd/db/query/transaction_test.go
index 3aebe186d..8b73b15d1 100644
--- a/lxd/db/query/transaction_test.go
+++ b/lxd/db/query/transaction_test.go
@@ -6,7 +6,6 @@ import (
 	"testing"
 
 	"github.com/lxc/lxd/lxd/db/query"
-	_ "github.com/mattn/go-sqlite3"
 	"github.com/stretchr/testify/assert"
 )
 
diff --git a/lxd/db/schema/schema_test.go b/lxd/db/schema/schema_test.go
index cd4130952..4c225980e 100644
--- a/lxd/db/schema/schema_test.go
+++ b/lxd/db/schema/schema_test.go
@@ -5,7 +5,6 @@ import (
 	"fmt"
 	"testing"
 
-	_ "github.com/mattn/go-sqlite3"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 
diff --git a/lxd/db/schema/update.go b/lxd/db/schema/update.go
index 3a9da889d..121cd1c73 100644
--- a/lxd/db/schema/update.go
+++ b/lxd/db/schema/update.go
@@ -7,7 +7,7 @@ import (
 	"path"
 	"runtime"
 
-	_ "github.com/mattn/go-sqlite3" // For opening the in-memory database
+	_ "github.com/CanonicalLtd/go-sqlite3" // For opening the in-memory database
 )
 
 // DotGo writes '<name>.go' source file in the package of the calling function, containing
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index fb39de907..ba789635b 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -5,7 +5,6 @@ import (
 	"fmt"
 	"strings"
 
-	_ "github.com/mattn/go-sqlite3"
 	"github.com/pkg/errors"
 
 	"github.com/lxc/lxd/lxd/db/query"
diff --git a/lxd/db/testing.go b/lxd/db/testing.go
index 0950156a3..beb025384 100644
--- a/lxd/db/testing.go
+++ b/lxd/db/testing.go
@@ -8,8 +8,8 @@ import (
 	"time"
 
 	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/CanonicalLtd/go-sqlite3"
 	"github.com/lxc/lxd/lxd/util"
-	"github.com/mattn/go-sqlite3"
 	"github.com/stretchr/testify/require"
 	"google.golang.org/grpc"
 )
diff --git a/lxd/devices.go b/lxd/devices.go
index 0f41b1431..872f0b713 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -17,8 +17,6 @@ import (
 	"strings"
 	"syscall"
 
-	_ "github.com/mattn/go-sqlite3"
-
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/util"
diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go
index acb1bf0e9..3e654f35b 100644
--- a/lxd/main_activateifneeded.go
+++ b/lxd/main_activateifneeded.go
@@ -6,14 +6,13 @@ import (
 	"os"
 	"path/filepath"
 
-	"github.com/CanonicalLtd/go-sqlite3x"
+	"github.com/CanonicalLtd/go-sqlite3"
 	"github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/idmap"
 	"github.com/lxc/lxd/shared/logger"
-	"github.com/mattn/go-sqlite3"
 )
 
 func init() {
@@ -114,7 +113,7 @@ func cmdActivateIfNeeded(args *Args) error {
 func sqliteDirectAccess(conn *sqlite3.SQLiteConn) error {
 	// Ensure journal mode is set to WAL, as this is a requirement for
 	// replication.
-	err := sqlite3x.JournalModePragma(conn, sqlite3x.JournalWal)
+	err := sqlite3.JournalModePragma(conn, sqlite3.JournalWal)
 	if err != nil {
 		return err
 	}
@@ -122,11 +121,11 @@ func sqliteDirectAccess(conn *sqlite3.SQLiteConn) error {
 	// Ensure we don't truncate or checkpoint the WAL on exit, as this
 	// would bork replication which must be in full control of the WAL
 	// file.
-	err = sqlite3x.JournalSizeLimitPragma(conn, -1)
+	err = sqlite3.JournalSizeLimitPragma(conn, -1)
 	if err != nil {
 		return err
 	}
-	err = sqlite3x.DatabaseNoCheckpointOnClose(conn)
+	err = sqlite3.DatabaseNoCheckpointOnClose(conn)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/profiles.go b/lxd/profiles.go
index 561633881..c247ed784 100644
--- a/lxd/profiles.go
+++ b/lxd/profiles.go
@@ -9,7 +9,6 @@ import (
 	"strings"
 
 	"github.com/gorilla/mux"
-	_ "github.com/mattn/go-sqlite3"
 
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/util"
diff --git a/lxd/response.go b/lxd/response.go
index c2408b445..6f992301f 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -11,7 +11,7 @@ import (
 	"os"
 	"time"
 
-	"github.com/mattn/go-sqlite3"
+	"github.com/CanonicalLtd/go-sqlite3"
 
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"

From 65adcfbf694b00f0211dc44b6538929de0f23151 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 25 Jan 2018 13:32:51 +0000
Subject: [PATCH 222/227] Update sqlite submodule to master HEAD

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/sqlite | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/sqlite b/lxd/sqlite
index 235392610..21302be3b 160000
--- a/lxd/sqlite
+++ b/lxd/sqlite
@@ -1 +1 @@
-Subproject commit 235392610287d85dda11a6eee4d6e34d7cc6ef3f
+Subproject commit 21302be3bf325e73019874f74cbc766c6e54cece

From 09bd1519e0ba50aa3daa41bdf78a5c95d2bbdbab Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Wed, 24 Jan 2018 16:46:52 -0500
Subject: [PATCH 223/227] Re-enable clustering tests
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 test/main.sh | 97 +++++++++++++++++++++++++++++-------------------------------
 1 file changed, 47 insertions(+), 50 deletions(-)

diff --git a/test/main.sh b/test/main.sh
index 73fc36110..21b5bd1fb 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -148,61 +148,58 @@ if [ "$#" -gt 0 ]; then
   exit
 fi
 
-run_test test_check_deps "checking dependencies"
-run_test test_static_analysis "static analysis"
-run_test test_database_update "database schema updates"
-run_test test_remote_url "remote url handling"
-run_test test_remote_admin "remote administration"
-run_test test_remote_usage "remote usage"
-run_test test_basic_usage "basic usage"
-run_test test_security "security features"
-run_test test_image_expiry "image expiry"
-run_test test_image_list_all_aliases "image list all aliases"
-run_test test_image_auto_update "image auto-update"
-run_test test_image_prefer_cached "image prefer cached"
-run_test test_image_import_dir "import image from directory"
-run_test test_concurrent_exec "concurrent exec"
-run_test test_concurrent "concurrent startup"
-run_test test_snapshots "container snapshots"
-run_test test_snap_restore "snapshot restores"
-run_test test_config_profiles "profiles and configuration"
-run_test test_config_edit "container configuration edit"
-run_test test_config_edit_container_snapshot_pool_config "container and snapshot volume configuration edit"
-run_test test_container_metadata "manage container metadata and templates"
-run_test test_server_config "server configuration"
-run_test test_filemanip "file manipulations"
-run_test test_network "network management"
-run_test test_idmap "id mapping"
-run_test test_template "file templating"
-run_test test_pki "PKI mode"
-run_test test_devlxd "/dev/lxd"
-run_test test_fuidshift "fuidshift"
-run_test test_migration "migration"
-run_test test_fdleak "fd leak"
-run_test test_cpu_profiling "CPU profiling"
-run_test test_mem_profiling "memory profiling"
-run_test test_storage "storage"
-run_test test_init_auto "lxd init auto"
-run_test test_init_interactive "lxd init interactive"
-run_test test_init_preseed "lxd init preseed"
-run_test test_storage_profiles "storage profiles"
-run_test test_container_import "container import"
-run_test test_storage_volume_attach "attaching storage volumes"
-run_test test_storage_driver_ceph "ceph storage driver"
-run_test test_resources "resources"
-run_test test_kernel_limits "kernel limits"
-run_test test_macaroon_auth "macaroon authentication"
-run_test test_console "console"
-run_test test_proxy_device "proxy device"
-
-# FIXME: clustering tests are currently not working on CI
-if ! [ -e "/lxc-ci/build/cache/sqlite" ]; then
+#run_test test_check_deps "checking dependencies"
+#run_test test_static_analysis "static analysis"
+#run_test test_database_update "database schema updates"
+#run_test test_remote_url "remote url handling"
+#run_test test_remote_admin "remote administration"
+#run_test test_remote_usage "remote usage"
+#run_test test_basic_usage "basic usage"
+#run_test test_security "security features"
+#run_test test_image_expiry "image expiry"
+#run_test test_image_list_all_aliases "image list all aliases"
+#run_test test_image_auto_update "image auto-update"
+#run_test test_image_prefer_cached "image prefer cached"
+#run_test test_image_import_dir "import image from directory"
+#run_test test_concurrent_exec "concurrent exec"
+#run_test test_concurrent "concurrent startup"
+#run_test test_snapshots "container snapshots"
+#run_test test_snap_restore "snapshot restores"
+#run_test test_config_profiles "profiles and configuration"
+#run_test test_config_edit "container configuration edit"
+#run_test test_config_edit_container_snapshot_pool_config "container and snapshot volume configuration edit"
+#run_test test_container_metadata "manage container metadata and templates"
+#run_test test_server_config "server configuration"
+#run_test test_filemanip "file manipulations"
+#run_test test_network "network management"
+#run_test test_idmap "id mapping"
+#run_test test_template "file templating"
+#run_test test_pki "PKI mode"
+#run_test test_devlxd "/dev/lxd"
+#run_test test_fuidshift "fuidshift"
+#run_test test_migration "migration"
+#run_test test_fdleak "fd leak"
+#run_test test_cpu_profiling "CPU profiling"
+#run_test test_mem_profiling "memory profiling"
+#run_test test_storage "storage"
+#run_test test_init_auto "lxd init auto"
+#run_test test_init_interactive "lxd init interactive"
+#run_test test_init_preseed "lxd init preseed"
+#run_test test_storage_profiles "storage profiles"
+#run_test test_container_import "container import"
+#run_test test_storage_volume_attach "attaching storage volumes"
+#run_test test_storage_driver_ceph "ceph storage driver"
+#run_test test_resources "resources"
+#run_test test_kernel_limits "kernel limits"
+#run_test test_macaroon_auth "macaroon authentication"
+#run_test test_console "console"
+#run_test test_proxy_device "proxy device"
+
     run_test test_clustering_membership "clustering membership"
     run_test test_clustering_containers "clustering containers"
     run_test test_clustering_storage "clustering storage"
     run_test test_clustering_network "clustering network"
     run_test test_clustering_upgrade "clustering upgrade"
-fi
 
 # shellcheck disable=SC2034
 TEST_RESULT=success

From 014398ccd929b6af930de5d9758b38d323506a0a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Wed, 24 Jan 2018 17:34:31 -0500
Subject: [PATCH 224/227] tests: Don't use bridge-utils
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 test/includes/clustering.sh | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index 58e8d8069..3a0dcfaf4 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -5,9 +5,8 @@ setup_clustering_bridge() {
 
   echo "==> Setup clustering bridge ${name}"
 
-  brctl addbr "${name}"
+  ip link add "${name}" up type bridge
   ip addr add 10.1.1.1/16 dev "${name}"
-  ip link set dev "${name}" up
 
   iptables -t nat -A POSTROUTING -s 10.1.0.0/16 -d 0.0.0.0/0 -j MASQUERADE
   echo 1 > /proc/sys/net/ipv4/ip_forward
@@ -16,13 +15,11 @@ setup_clustering_bridge() {
 teardown_clustering_bridge() {
   name="br$$"
 
-  if brctl show | grep -q "${name}" ; then
+  if [ -e "/sys/class/net/${name}" ]; then
       echo "==> Teardown clustering bridge ${name}"
       echo 0 > /proc/sys/net/ipv4/ip_forward
       iptables -t nat -D POSTROUTING -s 10.1.0.0/16 -d 0.0.0.0/0 -j MASQUERADE
-      ip link set dev "${name}" down
-      ip addr del 10.1.1.1/16 dev "${name}"
-      brctl delbr "${name}"
+      ip link del dev "${name}"
   fi
 }
 
@@ -115,7 +112,7 @@ EOF
   ip link set "${veth2}" netns "${ns}"
 
   nsbridge="br$$"
-  brctl addif "${nsbridge}" "${veth1}"
+  ip link set dev "${veth1}" master "${nsbridge}" up
 
   ip link set "${veth1}" up
   (
@@ -139,9 +136,7 @@ teardown_clustering_netns() {
       veth2="v${ns}2"
       nsenter --all --target="${pid}" ip link set eth0 down
       nsenter --all --target="${pid}" ip link set lo down
-      ip link set "${veth1}" down
-      brctl delif "${nsbridge}" "${veth1}"
-      ip link delete "${veth1}" type veth
+      ip link del "${veth1}"
       umount "/run/netns/${ns}"
       rm "/run/netns/${ns}"
       lxc-stop -n "${ns}"

From da888f0753b3bc9fcdd0a805e5837b7afbb4ce9b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 25 Jan 2018 17:06:26 -0500
Subject: [PATCH 225/227] tests: Fix mixed tab/spaces
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 test/includes/lxd.sh           | 34 +++++++++++++++++-----------------
 test/suites/basic.sh           |  2 +-
 test/suites/static_analysis.sh |  6 +++---
 3 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index ab34b66f1..a9a60ac55 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -17,7 +17,7 @@ spawn_lxd() {
     # Link to local sqlite with replication patch for dqlite
     sqlite="$(pwd)/../lxd/sqlite"
     if [ -e "/lxc-ci/build/cache/sqlite" ]; then
-	sqlite="/lxc-ci/build/cache/sqlite"
+        sqlite="/lxc-ci/build/cache/sqlite"
     fi
 
     # shellcheck disable=SC2153
@@ -44,10 +44,10 @@ spawn_lxd() {
     # shellcheck disable=SC2086
 
     if [ "${LXD_NETNS}" = "" ]; then
-	LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+        LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     else
-	pid="$(lxc-info -n "${LXD_NETNS}" -p | cut -f 2 -d : | tr -d " ")"
-	LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" nsenter --all --target="${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+        pid="$(lxc-info -n "${LXD_NETNS}" -p | cut -f 2 -d : | tr -d " ")"
+        LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" nsenter --all --target="${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     fi
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
@@ -59,15 +59,15 @@ spawn_lxd() {
     LXD_DIR="${lxddir}" lxd waitready --timeout=300
 
     if [ "${LXD_NETNS}" = "" ]; then
-	echo "==> Binding to network"
-	# shellcheck disable=SC2034
-	for i in $(seq 10); do
+        echo "==> Binding to network"
+        # shellcheck disable=SC2034
+        for i in $(seq 10); do
             addr="127.0.0.1:$(local_tcp_port)"
             LXD_DIR="${lxddir}" lxc config set core.https_address "${addr}" || continue
             echo "${addr}" > "${lxddir}/lxd.addr"
             echo "==> Bound to ${addr}"
             break
-	done
+        done
     fi
 
     echo "==> Setting trust password"
@@ -77,8 +77,8 @@ spawn_lxd() {
     fi
 
     if [ "${LXD_NETNS}" = "" ]; then
-	echo "==> Setting up networking"
-	LXD_DIR="${lxddir}" lxc profile device add default eth0 nic nictype=p2p name=eth0
+        echo "==> Setting up networking"
+        LXD_DIR="${lxddir}" lxc profile device add default eth0 nic nictype=p2p name=eth0
     fi
 
     if [ "${storage}" = true ]; then
@@ -104,23 +104,23 @@ respawn_lxd() {
     # Link to local sqlite with replication patch for dqlite
     sqlite="$(pwd)/../lxd/sqlite"
     if [ -e "/lxc-ci/build/cache/sqlite" ]; then
-	sqlite="/lxc-ci/build/cache/sqlite"
+        sqlite="/lxc-ci/build/cache/sqlite"
     fi
 
     echo "==> Spawning lxd in ${lxddir}"
     # shellcheck disable=SC2086
     if [ "${LXD_NETNS}" = "" ]; then
-	LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+        LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     else
-	pid="$(lxc-info -n "${LXD_NETNS}" -p | cut -f 2 -d : | tr -d " ")"
-	LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" nsenter --all --target="${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &    fi
+        pid="$(lxc-info -n "${LXD_NETNS}" -p | cut -f 2 -d : | tr -d " ")"
+        LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" nsenter --all --target="${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &    fi
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
     echo "==> Spawned LXD (PID is ${LXD_PID})"
 
     if [ "${wait}" = true ]; then
-	echo "==> Confirming lxd is responsive"
-	LXD_DIR="${lxddir}" lxd waitready --timeout=300
+        echo "==> Confirming lxd is responsive"
+        LXD_DIR="${lxddir}" lxd waitready --timeout=300
     fi
 }
 
@@ -206,7 +206,7 @@ kill_lxd() {
         check_empty "${daemon_dir}/snapshots/"
 
         echo "==> Checking for leftover cluster DB entries"
-	# FIXME: we should not use the command line sqlite client, since it's
+        # FIXME: we should not use the command line sqlite client, since it's
         #        not compatible with dqlite
         check_empty_table "${daemon_dir}/raft/db.bin" "containers"
         check_empty_table "${daemon_dir}/raft/db.bin" "containers_config"
diff --git a/test/suites/basic.sh b/test/suites/basic.sh
index 523b0a09f..1a3839056 100644
--- a/test/suites/basic.sh
+++ b/test/suites/basic.sh
@@ -252,7 +252,7 @@ test_basic_usage() {
     # Link to local sqlite with replication patch for dqlite
     sqlite="$(pwd)/../lxd/sqlite"
     if [ -e "/lxc-ci/build/cache/sqlite" ]; then
-	sqlite="/lxc-ci/build/cache/sqlite"
+        sqlite="/lxc-ci/build/cache/sqlite"
     fi
 
     set -e
diff --git a/test/suites/static_analysis.sh b/test/suites/static_analysis.sh
index 98e3b65d3..f7eae7c50 100644
--- a/test/suites/static_analysis.sh
+++ b/test/suites/static_analysis.sh
@@ -24,10 +24,10 @@ test_static_analysis() {
     # Go static analysis
     sqlite="$(pwd)/lxd/sqlite"
     if [ -e "/lxc-ci/build/cache/sqlite" ]; then
-	sqlite="/lxc-ci/build/cache/sqlite"
-	ls "/lxc-ci/build/cache/sqlite"
+        sqlite="/lxc-ci/build/cache/sqlite"
+        ls "/lxc-ci/build/cache/sqlite"
     fi
-    
+
     CGO_CFLAGS="-I${sqlite}"
     CGO_LDFLAGS="-L${sqlite}/.libs"
     LD_LIBRARY_PATH="${sqlite}/.libs"

From e442367f751524eb03099760ed5a00e7d21c27b3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 25 Jan 2018 17:07:36 -0500
Subject: [PATCH 226/227] tests: Drop trailing whitespaces
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 test/suites/clustering.sh          | 2 +-
 test/suites/init_preseed.sh        | 2 +-
 test/suites/kernel_limits.sh       | 2 +-
 test/suites/proxy.sh               | 1 -
 test/suites/storage.sh             | 2 +-
 test/suites/storage_driver_ceph.sh | 2 +-
 6 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 003d4ccbf..ec754914d 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -33,7 +33,7 @@ test_clustering_membership() {
   # networks and pools.
   LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 dir --target node1
   LXD_DIR="${LXD_ONE_DIR}" lxc network create net1 --target node2
-  
+
   # Spawn a third node, using the non-leader node2 as join target.
   setup_clustering_netns 3
   LXD_THREE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
diff --git a/test/suites/init_preseed.sh b/test/suites/init_preseed.sh
index e078304e0..4245ac596 100644
--- a/test/suites/init_preseed.sh
+++ b/test/suites/init_preseed.sh
@@ -59,7 +59,7 @@ profiles:
       parent: lxdt$$
       type: nic
 EOF
-  
+
     lxc info | grep -q 'core.https_address: 127.0.0.1:9999'
     lxc info | grep -q 'images.auto_update_interval: "15"'
     lxc network list | grep -q "lxdt$$"
diff --git a/test/suites/kernel_limits.sh b/test/suites/kernel_limits.sh
index 0515be572..19490d533 100644
--- a/test/suites/kernel_limits.sh
+++ b/test/suites/kernel_limits.sh
@@ -20,7 +20,7 @@ test_kernel_limits() {
   soft=$(grep ^"Max open files" /proc/"${pid}"/limits | awk '{print $4}')
   hard=$(grep ^"Max open files" /proc/"${pid}"/limits | awk '{print $5}')
 
-  lxc delete --force limits 
+  lxc delete --force limits
 
   [ "${soft}" = "3000" ] && [ "${hard}" = "3000" ]
 }
diff --git a/test/suites/proxy.sh b/test/suites/proxy.sh
index 324c407eb..9017c253e 100755
--- a/test/suites/proxy.sh
+++ b/test/suites/proxy.sh
@@ -1,5 +1,4 @@
 test_proxy_device() {
-  
   MESSAGE="Proxy device test string"
   HOST_TCP_PORT=$(local_tcp_port)
 
diff --git a/test/suites/storage.sh b/test/suites/storage.sh
index bb089c2bb..88ba169b3 100644
--- a/test/suites/storage.sh
+++ b/test/suites/storage.sh
@@ -2,7 +2,7 @@ test_storage() {
   ensure_import_testimage
 
   # shellcheck disable=2039
-  local LXD_STORAGE_DIR lxd_backend  
+  local LXD_STORAGE_DIR lxd_backend
 
   lxd_backend=$(storage_backend "$LXD_DIR")
   LXD_STORAGE_DIR=$(mktemp -d -p "${TEST_DIR}" XXXXXXXXX)
diff --git a/test/suites/storage_driver_ceph.sh b/test/suites/storage_driver_ceph.sh
index 2c28a7e88..b4211e693 100644
--- a/test/suites/storage_driver_ceph.sh
+++ b/test/suites/storage_driver_ceph.sh
@@ -2,7 +2,7 @@ test_storage_driver_ceph() {
   ensure_import_testimage
 
   # shellcheck disable=2039
-  local LXD_STORAGE_DIR lxd_backend  
+  local LXD_STORAGE_DIR lxd_backend
 
   lxd_backend=$(storage_backend "$LXD_DIR")
   LXD_STORAGE_DIR=$(mktemp -d -p "${TEST_DIR}" XXXXXXXXX)

From 52959189cc29545ab5a27e5faf6d4b50b2423dbb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Wed, 24 Jan 2018 22:40:08 -0500
Subject: [PATCH 227/227] tests: Drop use of lxc-execute
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 test/includes/clustering.sh | 118 +++++++++++---------------------------------
 test/includes/lxd.sh        |  16 +++---
 test/suites/clustering.sh   |   6 ++-
 3 files changed, 42 insertions(+), 98 deletions(-)

diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index 3a0dcfaf4..cd09dc69f 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -29,117 +29,59 @@ setup_clustering_netns() {
 
   prefix="lxd$$"
   ns="${prefix}${id}"
-  rcfile="${TEST_DIR}/${ns}.conf"
 
   echo "==> Setup clustering netns ${ns}"
 
-  cat > "${rcfile}" <<EOF
-lxc.console.path=none
-lxc.mount.entry = cgroup                 sys/fs/cgroup                  tmpfs   rw,nosuid,nodev,noexec,mode=755,create=dir                                   0 0
-lxc.mount.entry = cgroup2                sys/fs/cgroup/unified          cgroup2 rw,nosuid,nodev,noexec,relatime,create=dir                                   0 0
-lxc.mount.entry = name=systemd           sys/fs/cgroup/systemd          cgroup  rw,nosuid,nodev,noexec,relatime,xattr,clone_children,name=systemd,create=dir 0 0
-lxc.mount.entry = net_cls,net_prio       sys/fs/cgroup/net_cls,net_prio cgroup  rw,nosuid,nodev,noexec,relatime,net_cls,net_prio,clone_children,create=dir   0 0
-lxc.mount.entry = cpuset                 sys/fs/cgroup/cpuset           cgroup  rw,nosuid,nodev,noexec,relatime,cpuset,clone_children,create=dir             0 0
-lxc.mount.entry = hugetlb                sys/fs/cgroup/hugetlb          cgroup  rw,nosuid,nodev,noexec,relatime,hugetlb,clone_children,create=dir            0 0
-lxc.mount.entry = blkio                  sys/fs/cgroup/blkio            cgroup  rw,nosuid,nodev,noexec,relatime,blkio,clone_children,create=dir              0 0
-lxc.mount.entry = cpu,cpuacct            sys/fs/cgroup/cpu,cpuacct      cgroup  rw,nosuid,nodev,noexec,relatime,cpu,cpuacct,clone_children,create=dir        0 0
-lxc.mount.entry = pids                   sys/fs/cgroup/pids             cgroup  rw,nosuid,nodev,noexec,relatime,pids,clone_children,create=dir               0 0
-lxc.mount.entry = rdma                   sys/fs/cgroup/rdma             cgroup  rw,nosuid,nodev,noexec,relatime,rdma,clone_children,create=dir               0 0
-lxc.mount.entry = perf_event             sys/fs/cgroup/perf_event       cgroup  rw,nosuid,nodev,noexec,relatime,perf_event,clone_children,create=dir         0 0
-lxc.mount.entry = memory                 sys/fs/cgroup/memory           cgroup  rw,nosuid,nodev,noexec,relatime,memory,clone_children,create=dir             0 0
-lxc.mount.entry = freezer                sys/fs/cgroup/freezer          cgroup  rw,nosuid,nodev,noexec,relatime,freezer,clone_children,create=dir            0 0
-lxc.mount.entry = /sys/fs/cgroup/devices sys/fs/cgroup/devices          none    bind,create=dir 0 0
-
-# CGroup whitelist
-lxc.cgroup.devices.deny = a
-## Allow any mknod (but not reading/writing the node)
-lxc.cgroup.devices.allow = c *:* m
-lxc.cgroup.devices.allow = b *:* m
-## Allow specific devices
-### /dev/null
-lxc.cgroup.devices.allow = c 1:3 rwm
-### /dev/zero
-lxc.cgroup.devices.allow = c 1:5 rwm
-### /dev/full
-lxc.cgroup.devices.allow = c 1:7 rwm
-### /dev/tty
-lxc.cgroup.devices.allow = c 5:0 rwm
-### /dev/console
-lxc.cgroup.devices.allow = c 5:1 rwm
-### /dev/ptmx
-lxc.cgroup.devices.allow = c 5:2 rwm
-### /dev/random
-lxc.cgroup.devices.allow = c 1:8 rwm
-### /dev/urandom
-lxc.cgroup.devices.allow = c 1:9 rwm
-### /dev/pts/*
-lxc.cgroup.devices.allow = c 136:* rwm
-### fuse
-lxc.cgroup.devices.allow = c 10:229 rwm
-### loop
-lxc.cgroup.devices.allow = b 7:* rwm
-
-lxc.apparmor.profile = unconfined
-
-lxc.pty.max = 1024
-lxc.tty.max = 10
-lxc.environment=TERM=xterm
-
-lxc.hook.version = 1
-lxc.hook.autodev = mknod /dev/loop-control c 10, 237
-lxc.hook.autodev = mknod /dev/loop0 c 7 0
-lxc.hook.autodev = mknod /dev/loop1 c 7 1
-lxc.hook.autodev = mknod /dev/loop2 c 7 2
-lxc.hook.autodev = mknod /dev/loop3 c 7 3
-lxc.hook.autodev = mknod /dev/loop4 c 7 4
-lxc.hook.autodev = mknod /dev/loop5 c 7 5
-lxc.hook.autodev = mknod /dev/loop6 c 7 6
-lxc.hook.autodev = mknod /dev/loop7 c 7 7
+  (
+    cat << EOF
+set -e
+mkdir -p "${TEST_DIR}/ns/${ns}"
+touch "${TEST_DIR}/ns/${ns}/net"
+mount -o bind /proc/self/ns/net "${TEST_DIR}/ns/${ns}/net"
+sleep 300&
+echo \$! > "${TEST_DIR}/ns/${ns}/PID"
 EOF
-  lxc-execute -n "${ns}" --rcfile "${rcfile}" -- sh -c 'while true; do sleep 1; done' &
-  sleep 1
-
-  mkdir -p /run/netns
-  touch "/run/netns/${ns}"
-
-  pid="$(lxc-info -n "${ns}" -p | cut -f 2 -d : | tr -d " ")"
-  mount --bind "/proc/${pid}/ns/net" "/run/netns/${ns}"
+  ) | unshare -m -n /bin/sh
 
   veth1="v${ns}1"
   veth2="v${ns}2"
+  nspid=$(cat "${TEST_DIR}/ns/${ns}/PID")
 
   ip link add "${veth1}" type veth peer name "${veth2}"
-  ip link set "${veth2}" netns "${ns}"
+  ip link set "${veth2}" netns "${nspid}"
 
   nsbridge="br$$"
   ip link set dev "${veth1}" master "${nsbridge}" up
-
-  ip link set "${veth1}" up
   (
     cat <<EOF
-    ip link set dev lo up
-    ip link set dev "${veth2}" name eth0
-    ip link set eth0 up
-    ip addr add "10.1.1.10${id}/16" dev eth0
-    ip route add default via 10.1.1.1
+set -e
+
+ip link set dev lo up
+ip link set dev "${veth2}" name eth0
+ip link set eth0 up
+ip addr add "10.1.1.10${id}/16" dev eth0
+ip route add default via 10.1.1.1
 EOF
-  ) | nsenter --all --target="${pid}" sh
+  ) | nsenter -n -t "${nspid}" /bin/sh
 }
 
 teardown_clustering_netns() {
   prefix="lxd$$"
   nsbridge="br$$"
-  for ns in $(lxc-ls | grep "${prefix}") ; do
+
+  [ ! -d "${TEST_DIR}/ns/" ] && return
+
+  for ns in $(ls -1 "${TEST_DIR}/ns/"); do
       echo "==> Teardown clustering netns ${ns}"
-      pid="$(lxc-info -n "${ns}" -p | cut -f 2 -d : | tr -d " ")"
+
+      pid="$(cat "${TEST_DIR}/ns/${ns}/PID")"
+      kill -9 "${pid}"
+
       veth1="v${ns}1"
-      veth2="v${ns}2"
-      nsenter --all --target="${pid}" ip link set eth0 down
-      nsenter --all --target="${pid}" ip link set lo down
       ip link del "${veth1}"
-      umount "/run/netns/${ns}"
-      rm "/run/netns/${ns}"
-      lxc-stop -n "${ns}"
+
+      umount -l "${TEST_DIR}/ns/${ns}/net" >/dev/null 2>&1 || true
+      rm -Rf "${TEST_DIR}/ns/${ns}"
   done
 }
 
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index a9a60ac55..90003e290 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -46,8 +46,8 @@ spawn_lxd() {
     if [ "${LXD_NETNS}" = "" ]; then
         LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     else
-        pid="$(lxc-info -n "${LXD_NETNS}" -p | cut -f 2 -d : | tr -d " ")"
-        LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" nsenter --all --target="${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+        pid="$(cat "${TEST_DIR}/ns/${LXD_NETNS}/PID")"
+        LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" nsenter -n -t "${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     fi
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
@@ -112,8 +112,8 @@ respawn_lxd() {
     if [ "${LXD_NETNS}" = "" ]; then
         LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     else
-        pid="$(lxc-info -n "${LXD_NETNS}" -p | cut -f 2 -d : | tr -d " ")"
-        LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" nsenter --all --target="${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &    fi
+        pid="$(cat "${TEST_DIR}/ns/${LXD_NETNS}/PID")"
+        LD_LIBRARY_PATH="${sqlite}/.libs" LXD_DIR="${lxddir}" nsenter -n -t "${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &    fi
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
     echo "==> Spawned LXD (PID is ${LXD_PID})"
@@ -310,12 +310,12 @@ cleanup_lxds() {
         ip link del lxdt$$
     fi
 
+    # Cleanup clustering networking, if any
+    teardown_clustering_netns
+    teardown_clustering_bridge
+
     # Wipe the test environment
     wipe "$test_dir"
 
     umount_loops "$test_dir"
-
-    # Cleanup clustering networking, if any
-    teardown_clustering_netns
-    teardown_clustering_bridge
 }
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index ec754914d..316bf4a81 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -25,8 +25,10 @@ test_clustering_membership() {
   LXD_DIR="${LXD_TWO_DIR}" lxc info | grep -q 'images.auto_update_interval: "10"'
 
   # The preseeded network bridge exists on all nodes.
-  ip netns exec "${ns1}" ip link show "${bridge}" > /dev/null
-  ip netns exec "${ns2}" ip link show "${bridge}" > /dev/null
+  ns1_pid="$(cat "${TEST_DIR}/ns/${ns1}/PID")"
+  ns2_pid="$(cat "${TEST_DIR}/ns/${ns2}/PID")"
+  nsenter -n -t "${ns1_pid}" -- ip link show "${bridge}" > /dev/null
+  nsenter -n -t "${ns2_pid}" -- ip link show "${bridge}" > /dev/null
 
   # Create a pending network and pool, to show that they are not
   # considered when checking if the joining node has all the required


More information about the lxc-devel mailing list