[lxc-devel] [lxd/master] [WIP] storage api

brauner on Github lxc-bot at linuxcontainers.org
Thu Feb 2 10:45:43 UTC 2017


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 368 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20170202/6a4cbef7/attachment.bin>
-------------- next part --------------
From cc382e3c68b9edec2d6ad4f79a4909f535840145 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 5 Jan 2017 12:50:23 +0100
Subject: [PATCH 01/63] lxd/db: implement storage api tables and functions

Tables:
- storage_pools
  - The pool name has to be unique.
- storage_pools_config
  - Each config is linked uniquely to a storage pool.
- storage_volumes
  - Storage volume names are unique with respect to a storage pool and volume
    type.
- storage_volumes_config
  - Each config is linked uniquely to a storage volume.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/db.go                 |  30 +++
 lxd/db_containers.go      |  27 ++-
 lxd/db_images.go          |  43 ++++
 lxd/db_storage_pools.go   | 511 ++++++++++++++++++++++++++++++++++++++++++++++
 lxd/db_storage_volumes.go |  99 +++++++++
 lxd/db_update.go          |  37 ++++
 6 files changed, 746 insertions(+), 1 deletion(-)
 create mode 100644 lxd/db_storage_pools.go
 create mode 100644 lxd/db_storage_volumes.go

diff --git a/lxd/db.go b/lxd/db.go
index 969db52..a920bde 100644
--- a/lxd/db.go
+++ b/lxd/db.go
@@ -179,6 +179,36 @@ CREATE TABLE IF NOT EXISTS schema (
     version INTEGER NOT NULL,
     updated_at DATETIME NOT NULL,
     UNIQUE (version)
+);
+CREATE TABLE IF NOT EXISTS storage_pools (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    pool_name VARCHAR(255) NOT NULL,
+    driver VARCHAR(255) NOT NULL,
+    UNIQUE (pool_name)
+);
+CREATE TABLE IF NOT EXISTS storage_pools_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (storage_pool_id, key),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE IF NOT EXISTS storage_volumes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    volume_name VARCHAR(255) NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    storage_volume_type INTEGER NOT NULL,
+    UNIQUE (storage_pool_id, volume_name, storage_volume_type),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE IF NOT EXISTS storage_volumes_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_volume_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (storage_volume_id, key),
+    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE
 );`
 
 // Create the initial (current) schema for a given SQLite DB connection.
diff --git a/lxd/db_containers.go b/lxd/db_containers.go
index 74aacbf..0537702 100644
--- a/lxd/db_containers.go
+++ b/lxd/db_containers.go
@@ -72,7 +72,8 @@ func dbContainerGet(db *sql.DB, name string) (containerArgs, error) {
 	statefulInt := -1
 	q := "SELECT id, architecture, type, ephemeral, stateful, creation_date, last_use_date FROM containers WHERE name=?"
 	arg1 := []interface{}{name}
-	arg2 := []interface{}{&args.Id, &args.Architecture, &args.Ctype, &ephemInt, &statefulInt, &args.CreationDate, &used}
+	arg2 := []interface{}{&args.Id, &args.Architecture, &args.Ctype,
+		&ephemInt, &statefulInt, &args.CreationDate, &used}
 	err := dbQueryRowScan(db, q, arg1, arg2)
 	if err != nil {
 		return args, err
@@ -452,3 +453,27 @@ func dbContainerGetSnapshots(db *sql.DB, name string) ([]string, error) {
 
 	return result, nil
 }
+
+// Get the storage pool of a given container.
+func dbContainerPool(db *sql.DB, containerName string) (string, error) {
+	// Get container storage volume. Since container names are globally
+	// unique, and their storage volumes carry the same name, their storage
+	// volumes are unique too.
+	poolName := ""
+	query := `SELECT pool_name FROM storage_pools
+JOIN storage_volumes ON storage_pools.id=storage_volumes.storage_pool_id
+WHERE storage_volumes.volume_name=? AND storage_volumes.storage_volume_type=?`
+	inargs := []interface{}{containerName, storagePoolVolumeTypeContainer}
+	outargs := []interface{}{&poolName}
+
+	err := dbQueryRowScan(db, query, inargs, outargs)
+	if err != nil {
+		if err == sql.ErrNoRows {
+			return "", NoSuchObjectError
+		}
+
+		return "", err
+	}
+
+	return poolName, nil
+}
diff --git a/lxd/db_images.go b/lxd/db_images.go
index c8bc4fa..a6b0ba5 100644
--- a/lxd/db_images.go
+++ b/lxd/db_images.go
@@ -457,3 +457,46 @@ func dbImageInsert(db *sql.DB, fp string, fname string, sz int64, public bool, a
 
 	return nil
 }
+
+// Get the names of all storage pools on which a given image exists.
+func dbImageGetPools(db *sql.DB, imageFingerprint string) ([]int64, error) {
+	poolID := int64(-1)
+	query := "SELECT storage_pool_id FROM storage_volumes WHERE volume_name=? AND storage_volume_type=?"
+	inargs := []interface{}{imageFingerprint, storagePoolVolumeTypeImage}
+	outargs := []interface{}{poolID}
+
+	result, err := dbQueryScan(db, query, inargs, outargs)
+	if err != nil {
+		return []int64{}, err
+	}
+
+	poolIDs := []int64{}
+	for _, r := range result {
+		poolIDs = append(poolIDs, r[0].(int64))
+	}
+
+	return poolIDs, nil
+}
+
+// Get the names of all storage pools on which a given image exists.
+func dbImageGetPoolNamesFromIDs(db *sql.DB, poolIDs []int64) ([]string, error) {
+	var poolName string
+	query := "SELECT pool_name FROM storage_pools WHERE id=?"
+
+	poolNames := []string{}
+	for _, poolID := range poolIDs {
+		inargs := []interface{}{poolID}
+		outargs := []interface{}{poolName}
+
+		result, err := dbQueryScan(db, query, inargs, outargs)
+		if err != nil {
+			return []string{}, err
+		}
+
+		for _, r := range result {
+			poolNames = append(poolNames, r[0].(string))
+		}
+	}
+
+	return poolNames, nil
+}
diff --git a/lxd/db_storage_pools.go b/lxd/db_storage_pools.go
new file mode 100644
index 0000000..69daf3e
--- /dev/null
+++ b/lxd/db_storage_pools.go
@@ -0,0 +1,511 @@
+package main
+
+import (
+	"database/sql"
+
+	_ "github.com/mattn/go-sqlite3"
+
+	"github.com/lxc/lxd/shared/api"
+)
+
+// Get all storage pools.
+func dbStoragePools(db *sql.DB) ([]string, error) {
+	var name string
+	query := "SELECT pool_name FROM storage_pools"
+	inargs := []interface{}{}
+	outargs := []interface{}{name}
+
+	result, err := dbQueryScan(db, query, inargs, outargs)
+	if err != nil {
+		return []string{}, err
+	}
+
+	if len(result) == 0 {
+		return []string{}, NoSuchObjectError
+	}
+
+	pools := []string{}
+	for _, r := range result {
+		pools = append(pools, r[0].(string))
+	}
+
+	return pools, nil
+}
+
+// Get the names of all storage volumes attached to a given storage pool.
+func dbStoragePoolsGetDrivers(db *sql.DB) ([]string, error) {
+	var poolDriver string
+	query := "SELECT DISTINCT driver FROM storage_pools"
+	inargs := []interface{}{}
+	outargs := []interface{}{poolDriver}
+
+	result, err := dbQueryScan(db, query, inargs, outargs)
+	if err != nil {
+		return []string{}, err
+	}
+
+	if len(result) == 0 {
+		return []string{}, NoSuchObjectError
+	}
+
+	drivers := []string{}
+	for _, driver := range result {
+		drivers = append(drivers, driver[0].(string))
+	}
+
+	return drivers, nil
+}
+
+// Get id of a single storage pool.
+func dbStoragePoolGetID(db *sql.DB, poolName string) (int64, error) {
+	poolID := int64(-1)
+	query := "SELECT id FROM storage_pools WHERE pool_name=?"
+	inargs := []interface{}{poolName}
+	outargs := []interface{}{&poolID}
+
+	err := dbQueryRowScan(db, query, inargs, outargs)
+	if err != nil {
+		if err == sql.ErrNoRows {
+			return -1, NoSuchObjectError
+		}
+	}
+
+	return poolID, nil
+}
+
+// Get a single storage pool.
+func dbStoragePoolGet(db *sql.DB, poolName string) (int64, *api.StoragePool, error) {
+	var poolDriver string
+	poolID := int64(-1)
+	query := "SELECT id, driver FROM storage_pools WHERE pool_name=?"
+	inargs := []interface{}{poolName}
+	outargs := []interface{}{&poolID, &poolDriver}
+
+	err := dbQueryRowScan(db, query, inargs, outargs)
+	if err != nil {
+		if err == sql.ErrNoRows {
+			return -1, nil, NoSuchObjectError
+		}
+	}
+
+	config, err := dbStoragePoolConfigGet(db, poolID)
+	if err != nil {
+		return -1, nil, err
+	}
+
+	storagePool := api.StoragePool{
+		PoolName:   poolName,
+		PoolDriver: poolDriver,
+	}
+	storagePool.PoolConfig = config
+
+	return poolID, &storagePool, nil
+}
+
+// Get config of a storage pool.
+func dbStoragePoolConfigGet(db *sql.DB, poolID int64) (map[string]string, error) {
+	var key, value string
+	query := "SELECT key, value FROM storage_pools_config WHERE storage_pool_id=?"
+	inargs := []interface{}{poolID}
+	outargs := []interface{}{key, value}
+
+	results, err := dbQueryScan(db, query, inargs, outargs)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(results) == 0 {
+		/*
+		 * If we didn't get any rows here, let's check to make sure the
+		 * storage pool really exists; if it doesn't, let's send back a
+		 * 404.
+		 */
+		query := "SELECT id FROM storage_pools WHERE id=?"
+		var r int
+		results, err := dbQueryScan(db, query, []interface{}{poolID}, []interface{}{r})
+		if err != nil {
+			return nil, err
+		}
+
+		if len(results) == 0 {
+			return nil, NoSuchObjectError
+		}
+	}
+
+	config := map[string]string{}
+
+	for _, r := range results {
+		key = r[0].(string)
+		value = r[1].(string)
+
+		config[key] = value
+	}
+
+	return config, nil
+}
+
+// Create new storage pool.
+func dbStoragePoolCreate(db *sql.DB, poolName string, poolDriver string, poolConfig map[string]string) (int64, error) {
+	tx, err := dbBegin(db)
+	if err != nil {
+		return -1, err
+	}
+
+	result, err := tx.Exec("INSERT INTO storage_pools (pool_name, driver) VALUES (?, ?)", poolName, poolDriver)
+	if err != nil {
+		tx.Rollback()
+		return -1, err
+	}
+
+	id, err := result.LastInsertId()
+	if err != nil {
+		tx.Rollback()
+		return -1, err
+	}
+
+	err = dbStoragePoolConfigAdd(tx, id, poolConfig)
+	if err != nil {
+		tx.Rollback()
+		return -1, err
+	}
+
+	err = txCommit(tx)
+	if err != nil {
+		return -1, err
+	}
+
+	return id, nil
+}
+
+// Add new storage pool config.
+func dbStoragePoolConfigAdd(tx *sql.Tx, poolID int64, poolConfig map[string]string) error {
+	str := "INSERT INTO storage_pools_config (storage_pool_id, key, value) VALUES(?, ?, ?)"
+	stmt, err := tx.Prepare(str)
+	defer stmt.Close()
+
+	for k, v := range poolConfig {
+		if v == "" {
+			continue
+		}
+
+		_, err = stmt.Exec(poolID, k, v)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Update storage pool.
+func dbStoragePoolUpdate(db *sql.DB, poolName string, poolConfig map[string]string) error {
+	poolID, _, err := dbStoragePoolGet(db, poolName)
+	if err != nil {
+		return err
+	}
+
+	tx, err := dbBegin(db)
+	if err != nil {
+		return err
+	}
+
+	err = dbStoragePoolConfigClear(tx, poolID)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	err = dbStoragePoolConfigAdd(tx, poolID, poolConfig)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	return txCommit(tx)
+}
+
+// Delete storage pool config.
+func dbStoragePoolConfigClear(tx *sql.Tx, poolID int64) error {
+	_, err := tx.Exec("DELETE FROM storage_pools_config WHERE storage_pool_id=?", poolID)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Delete storage pool.
+func dbStoragePoolDelete(db *sql.DB, poolName string) error {
+	poolID, _, err := dbStoragePoolGet(db, poolName)
+	if err != nil {
+		return err
+	}
+
+	tx, err := dbBegin(db)
+	if err != nil {
+		return err
+	}
+
+	_, err = tx.Exec("DELETE FROM storage_pools WHERE id=?", poolID)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	err = dbStoragePoolConfigClear(tx, poolID)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	return txCommit(tx)
+}
+
+// Rename storage pool.
+func dbStoragePoolRename(db *sql.DB, oldPoolName string, newPoolName string) error {
+	poolID, err := dbStoragePoolGetID(db, oldPoolName)
+	if err != nil {
+		return err
+	}
+
+	tx, err := dbBegin(db)
+	if err != nil {
+		return err
+	}
+
+	_, err = tx.Exec("UPDATE storage_pools SET pool_name=? WHERE id=?", newPoolName, poolID)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	return txCommit(tx)
+}
+
+// Get the names of all storage volumes attached to a given storage pool.
+func dbStoragePoolVolumesGetNames(db *sql.DB, poolID int64) ([]string, error) {
+	var volumeName string
+	query := "SELECT DISTINCT volume_name FROM storage_volumes WHERE storage_pool_id=?"
+	inargs := []interface{}{poolID}
+	outargs := []interface{}{volumeName}
+
+	result, err := dbQueryScan(db, query, inargs, outargs)
+	if err != nil {
+		return []string{}, err
+	}
+
+	if len(result) == 0 {
+		return []string{}, NoSuchObjectError
+	}
+
+	response := []string{}
+	for _, r := range result {
+		response = append(response, r[0].(string))
+	}
+
+	return response, nil
+}
+
+// Get all storage volumes attached to a given storage pool.
+func dbStoragePoolVolumesGet(db *sql.DB, poolID int64) ([]*api.StorageVolume, error) {
+	// Get all storage volumes of all types attached to a given storage
+	// pool.
+	result := []*api.StorageVolume{}
+	for _, volumeType := range supportedVolumeTypes {
+		volumeNames, err := dbStoragePoolVolumesGetType(db, volumeType, poolID)
+		if err != nil && err != sql.ErrNoRows {
+			return nil, err
+		}
+		for _, volumeName := range volumeNames {
+			_, volume, err := dbStoragePoolVolumeGetType(db, volumeName, volumeType, poolID)
+			if err != nil {
+				return nil, err
+			}
+			result = append(result, volume)
+		}
+	}
+
+	if len(result) == 0 {
+		return result, NoSuchObjectError
+	}
+
+	return result, nil
+}
+
+// Get all storage volumes attached to a given storage pool of a given volume
+// type.
+func dbStoragePoolVolumesGetType(db *sql.DB, volumeType int, poolID int64) ([]string, error) {
+	var poolName string
+	query := "SELECT volume_name FROM storage_volumes WHERE storage_pool_id=? AND storage_volume_type=?"
+	inargs := []interface{}{poolID, volumeType}
+	outargs := []interface{}{poolName}
+
+	result, err := dbQueryScan(db, query, inargs, outargs)
+	if err != nil {
+		return []string{}, err
+	}
+
+	response := []string{}
+	for _, r := range result {
+		response = append(response, r[0].(string))
+	}
+
+	return response, nil
+}
+
+// Get a single storage volume attached to a given storage pool of a given type.
+func dbStoragePoolVolumeGetType(db *sql.DB, volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
+	volumeID, err := dbStoragePoolVolumeGetTypeID(db, volumeName, volumeType, poolID)
+	if err != nil {
+		return -1, nil, err
+	}
+
+	volumeConfig, err := dbStorageVolumeConfigGet(db, volumeID)
+	if err != nil {
+		return -1, nil, err
+	}
+
+	volumeTypeName, err := storagePoolVolumeTypeToName(volumeType)
+	if err != nil {
+		return -1, nil, err
+	}
+
+	storageVolume := api.StorageVolume{
+		VolumeType: volumeTypeName,
+	}
+	storageVolume.VolumeName = volumeName
+	storageVolume.VolumeConfig = volumeConfig
+
+	return volumeID, &storageVolume, nil
+}
+
+// Update storage volume attached to a given storage pool.
+func dbStoragePoolVolumeUpdate(db *sql.DB, volumeName string, volumeType int, poolID int64, volumeConfig map[string]string) error {
+	volumeID, _, err := dbStoragePoolVolumeGetType(db, volumeName, volumeType, poolID)
+	if err != nil {
+		return err
+	}
+
+	tx, err := dbBegin(db)
+	if err != nil {
+		return err
+	}
+
+	err = dbStorageVolumeConfigClear(tx, volumeID)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	err = dbStorageVolumeConfigAdd(tx, volumeID, volumeConfig)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	return txCommit(tx)
+}
+
+// Delete storage volume attached to a given storage pool.
+func dbStoragePoolVolumeDelete(db *sql.DB, volumeName string, volumeType int, poolID int64) error {
+	volumeID, _, err := dbStoragePoolVolumeGetType(db, volumeName, volumeType, poolID)
+	if err != nil {
+		return err
+	}
+
+	tx, err := dbBegin(db)
+	if err != nil {
+		return err
+	}
+
+	_, err = tx.Exec("DELETE FROM storage_volumes WHERE id=? AND storage_volume_type=?", volumeID, volumeType)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	err = dbStorageVolumeConfigClear(tx, volumeID)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	return txCommit(tx)
+}
+
+// Rename storage volume attached to a given storage pool.
+func dbStoragePoolVolumeRename(db *sql.DB, oldVolumeName string, newVolumeName string, volumeType int, poolID int64) error {
+	volumeID, _, err := dbStoragePoolVolumeGetType(db, oldVolumeName, volumeType, poolID)
+	if err != nil {
+		return err
+	}
+
+	tx, err := dbBegin(db)
+	if err != nil {
+		return err
+	}
+
+	_, err = tx.Exec("UPDATE storage_volumes SET volume_name=? WHERE id=? AND storage_volume_type=?", newVolumeName, volumeID, volumeType)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	return txCommit(tx)
+}
+
+// Create new storage volume attached to a given storage pool.
+func dbStoragePoolVolumeCreate(db *sql.DB, volumeName string, volumeType int, poolID int64, volumeConfig map[string]string) (int64, error) {
+	tx, err := dbBegin(db)
+	if err != nil {
+		return -1, err
+	}
+
+	result, err := tx.Exec("INSERT INTO storage_volumes (storage_pool_id, storage_volume_type, volume_name) VALUES (?, ?, ?)", poolID, volumeType, volumeName)
+	if err != nil {
+		tx.Rollback()
+		return -1, err
+	}
+
+	volumeID, err := result.LastInsertId()
+	if err != nil {
+		tx.Rollback()
+		return -1, err
+	}
+
+	err = dbStorageVolumeConfigAdd(tx, volumeID, volumeConfig)
+	if err != nil {
+		tx.Rollback()
+		return -1, err
+	}
+
+	err = txCommit(tx)
+	if err != nil {
+		return -1, err
+	}
+
+	return volumeID, nil
+}
+
+// Get ID of a storage volume on a given storage pool of a given storage volume
+// type.
+func dbStoragePoolVolumeGetTypeID(db *sql.DB, volumeName string, volumeType int, poolID int64) (int64, error) {
+	volumeID := int64(-1)
+	query := `SELECT storage_volumes.id
+FROM storage_volumes
+JOIN storage_pools
+ON storage_volumes.storage_pool_id = storage_pools.id
+WHERE storage_volumes.storage_pool_id=?
+AND storage_volumes.volume_name=? AND storage_volume_type=?`
+	inargs := []interface{}{poolID, volumeName, volumeType}
+	outargs := []interface{}{&volumeID}
+
+	err := dbQueryRowScan(db, query, inargs, outargs)
+	if err != nil {
+		if err == sql.ErrNoRows {
+			return -1, NoSuchObjectError
+		}
+	}
+
+	return volumeID, nil
+}
diff --git a/lxd/db_storage_volumes.go b/lxd/db_storage_volumes.go
new file mode 100644
index 0000000..8d15472
--- /dev/null
+++ b/lxd/db_storage_volumes.go
@@ -0,0 +1,99 @@
+package main
+
+import (
+	"database/sql"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+// Get all storage volumes.
+func dbStorageVolumes(db *sql.DB) ([]string, error) {
+	var name string
+	query := "SELECT volume_name FROM storage_volumes"
+	inargs := []interface{}{}
+	outargs := []interface{}{name}
+
+	result, err := dbQueryScan(db, query, inargs, outargs)
+	if err != nil {
+		return []string{}, err
+	}
+
+	volumes := []string{}
+	for _, r := range result {
+		volumes = append(volumes, r[0].(string))
+	}
+
+	return volumes, nil
+}
+
+// Get config of a storage volume.
+func dbStorageVolumeConfigGet(db *sql.DB, volumeID int64) (map[string]string, error) {
+	var key, value string
+	query := "SELECT key, value FROM storage_volumes_config WHERE storage_volume_id=?"
+	inargs := []interface{}{volumeID}
+	outargs := []interface{}{key, value}
+
+	results, err := dbQueryScan(db, query, inargs, outargs)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(results) == 0 {
+		/*
+		 * If we didn't get any rows here, let's check to make sure the
+		 * storage volume really exists; if it doesn't, let's send back
+		 * a 404.
+		 */
+		query := "SELECT id FROM storage_volumes WHERE id=?"
+		var r int
+		results, err := dbQueryScan(db, query, []interface{}{volumeID}, []interface{}{r})
+		if err != nil {
+			return nil, err
+		}
+
+		if len(results) == 0 {
+			return nil, NoSuchObjectError
+		}
+	}
+
+	config := map[string]string{}
+
+	for _, r := range results {
+		key = r[0].(string)
+		value = r[1].(string)
+
+		config[key] = value
+	}
+
+	return config, nil
+}
+
+// Add new storage volume config into database.
+func dbStorageVolumeConfigAdd(tx *sql.Tx, volumeID int64, volumeConfig map[string]string) error {
+	str := "INSERT INTO storage_volumes_config (storage_volume_id, key, value) VALUES(?, ?, ?)"
+	stmt, err := tx.Prepare(str)
+	defer stmt.Close()
+
+	for k, v := range volumeConfig {
+		if v == "" {
+			continue
+		}
+
+		_, err = stmt.Exec(volumeID, k, v)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Delete storage volume config.
+func dbStorageVolumeConfigClear(tx *sql.Tx, volumeID int64) error {
+	_, err := tx.Exec("DELETE FROM storage_volumes_config WHERE storage_volume_id=?", volumeID)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/lxd/db_update.go b/lxd/db_update.go
index a22d35c..89f7f01 100644
--- a/lxd/db_update.go
+++ b/lxd/db_update.go
@@ -68,6 +68,7 @@ var dbUpdates = []dbUpdate{
 	{version: 32, run: dbUpdateFromV31},
 	{version: 33, run: dbUpdateFromV32},
 	{version: 34, run: dbUpdateFromV33},
+	{version: 35, run: dbUpdateFromV34},
 }
 
 type dbUpdate struct {
@@ -124,6 +125,42 @@ func dbUpdatesApplyAll(d *Daemon) error {
 }
 
 // Schema updates begin here
+func dbUpdateFromV34(currentVersion int, version int, d *Daemon) error {
+	stmt := `
+CREATE TABLE IF NOT EXISTS storage_pools (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    pool_name VARCHAR(255) NOT NULL,
+    driver VARCHAR(255) NOT NULL,
+    UNIQUE (pool_name)
+);
+CREATE TABLE IF NOT EXISTS storage_pools_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (storage_pool_id, key),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE IF NOT EXISTS storage_volumes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    volume_name VARCHAR(255) NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    storage_volume_type INTEGER NOT NULL,
+    UNIQUE (storage_pool_id, volume_name, storage_volume_type),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE IF NOT EXISTS storage_volumes_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_volume_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (storage_volume_id, key),
+    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE
+);`
+	_, err := d.db.Exec(stmt)
+	return err
+}
+
 func dbUpdateFromV33(currentVersion int, version int, d *Daemon) error {
 	stmt := `
 CREATE TABLE IF NOT EXISTS networks (

From 9a93f7693527b7f1ecc01012c60624d05dead9d6 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 5 Jan 2017 12:57:52 +0100
Subject: [PATCH 02/63] lxd: implement new storage api endpoints and types

- /1.0/storage-pools
  - GET: list pool
    - func storagePoolsGet(d *Daemon, r *http.Request) Response
  - POST: create a new pool
    - func storagePoolsPost(d *Daemon, r *http.Request) Response

- /1.0/storage-pools/{pool_name}
  - GET: list pool properties
    - func storagePoolGet(d *Daemon, r *http.Request) Response
  - POST: rename pool
    - func storagePoolPost(d *Daemon, r *http.Request) Response
  - PUT: replace pool properties
    - func storagePoolPut(d *Daemon, r *http.Request) Response
  - PATCH: change pool properties
    - func storagePoolPatch(d *Daemon, r *http.Request) Response
  - DELETE: remove pool
    - func storagePoolDelete(d *Daemon, r *http.Request) Response

- /1.0/storage-pools/{pool_name}/volumes
  - GET: list volumes
    - func storagePoolVolumesGet(d *Daemon, r *http.Request) Response

- /1.0/storage-pools/{pool_name}/volumes/{volume_type}
  - GET: list volumes of a given {volume_type}
    - func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response
  - POST: create a new volume of a given {volume_type}
    - func storagePoolVolumesTypePost(d *Daemon, r *http.Request) Response

- /1.0/storage-pools/{pool_name}/volumes/{volume_type}/{volume_name}
  - GET: list volume properties
    - func storagePoolVolumeTypeGet(d *Daemon, r *http.Request) Response
  - POST: rename volume (or migrate it)
    - func storagePoolVolumeTypePost(d *Daemon, r *http.Request) Response
  - PUT: replace volume properties
    - func storagePoolVolumeTypePut(d *Daemon, r *http.Request) Response
  - PATCH: change volume properties
    - func storagePoolVolumeTypePatch(d *Daemon, r *http.Request) Response
  - DELETE: remove volume
    - func storagePoolVolumeTypeDelete(d *Daemon, r *http.Request) Response

Storage pools and volumes are handled with various structs in
shared/api/storage.go that orient themselves on our new general api type layout.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_pools.go          | 307 +++++++++++++++++++++++++
 lxd/storage_pools_config.go   | 162 ++++++++++++++
 lxd/storage_pools_utils.go    |  63 ++++++
 lxd/storage_utils.go          |  54 +++++
 lxd/storage_volumes.go        | 507 ++++++++++++++++++++++++++++++++++++++++++
 lxd/storage_volumes_config.go | 125 +++++++++++
 lxd/storage_volumes_utils.go  | 188 ++++++++++++++++
 shared/api/storage.go         |  37 +++
 8 files changed, 1443 insertions(+)
 create mode 100644 lxd/storage_pools.go
 create mode 100644 lxd/storage_pools_config.go
 create mode 100644 lxd/storage_pools_utils.go
 create mode 100644 lxd/storage_utils.go
 create mode 100644 lxd/storage_volumes.go
 create mode 100644 lxd/storage_volumes_config.go
 create mode 100644 lxd/storage_volumes_utils.go
 create mode 100644 shared/api/storage.go

diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
new file mode 100644
index 0000000..fc86f0a
--- /dev/null
+++ b/lxd/storage_pools.go
@@ -0,0 +1,307 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"strconv"
+
+	"github.com/gorilla/mux"
+	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/version"
+)
+
+// /1.0/storage-pools
+// List all storage pools.
+func storagePoolsGet(d *Daemon, r *http.Request) Response {
+	recursionStr := r.FormValue("recursion")
+	recursion, err := strconv.Atoi(recursionStr)
+	if err != nil {
+		recursion = 0
+	}
+
+	pools, err := dbStoragePools(d.db)
+	if err != nil && err != NoSuchObjectError {
+		return InternalError(err)
+	}
+
+	resultString := []string{}
+	resultMap := []api.StoragePool{}
+	for _, pool := range pools {
+		if recursion == 0 {
+			resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, pool))
+		} else {
+			_, pl, err := dbStoragePoolGet(d.db, pool)
+			if err != nil {
+				continue
+			}
+			resultMap = append(resultMap, *pl)
+		}
+	}
+
+	if recursion == 0 {
+		return SyncResponse(true, resultString)
+	}
+
+	return SyncResponse(true, resultMap)
+}
+
+// /1.0/storage-pools
+// Create a storage pool.
+func storagePoolsPost(d *Daemon, r *http.Request) Response {
+	req := api.StoragePool{}
+
+	// Parse the request.
+	err := json.NewDecoder(r.Body).Decode(&req)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	// Sanity checks.
+	if req.PoolName == "" {
+		return BadRequest(fmt.Errorf("No name provided"))
+	}
+
+	// Check if the storage pool name is valid.
+	err = storageValidName(req.PoolName)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	// Check that the storage pool does not already exist.
+	_, err = dbStoragePoolGetID(d.db, req.PoolName)
+	if err != nil {
+		if err != NoSuchObjectError {
+			return InternalError(err)
+		}
+	}
+
+	// Make sure that we don't pass a nil to the next function.
+	if req.PoolConfig == nil {
+		req.PoolConfig = map[string]string{}
+	}
+
+	// Validate the requested storage pool configuration.
+	err = storagePoolValidateConfig(req.PoolName, req.PoolDriver, req.PoolConfig)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	// Create the database entry for the storage pool.
+	_, err = dbStoragePoolCreate(d.db, req.PoolName, req.PoolDriver, req.PoolConfig)
+	if err != nil {
+		return InternalError(fmt.Errorf("Error inserting %s into database: %s", req.PoolName, err))
+	}
+
+	// Define a function which reverts everything.  Defer this function
+	// so that it doesn't need to be explicitly called in every failing
+	// return path. Track whether or not we want to undo the changes
+	// using a closure.
+	tryUndo := true // CLEANER
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			dbStoragePoolDelete(d.db, req.PoolName)
+		}
+	}()
+
+	s, err := storagePoolInit(d, req.PoolName)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	err = s.StoragePoolCreate()
+	if err != nil {
+		return InternalError(err)
+	}
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			s.StoragePoolDelete()
+		}
+	}()
+
+	// In case the storage pool config was changed during the pool creation,
+	// we need to update the database to reflect this change. This can e.g.
+	// happen, when we create a loop file image. This means we append ".img"
+	// to the path the user gave us and update the config in the storage
+	// callback. So diff the config here to see if something like this has
+	// happened.
+	postCreateConfig := s.GetStoragePoolWritable().PoolConfig
+	configDiff, _ := storageConfigDiff(req.PoolConfig, postCreateConfig)
+	if len(configDiff) > 0 {
+		// Create the database entry for the storage pool.
+		err = dbStoragePoolUpdate(d.db, req.PoolName, postCreateConfig)
+		if err != nil {
+			return InternalError(fmt.Errorf("Error inserting %s into database: %s", req.PoolName, err))
+		}
+	}
+
+	// Success, update the closure to mark that the changes should be kept.
+	tryUndo = false // CLEANER
+
+	return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, req.PoolName))
+}
+
+var storagePoolsCmd = Command{name: "storage-pools", get: storagePoolsGet, post: storagePoolsPost}
+
+// /1.0/storage-pools/{pool_name}
+// Get a single storage pool.
+func storagePoolGet(d *Daemon, r *http.Request) Response {
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("You must provide a pool name."))
+	}
+
+	// Get the existing storage pool.
+	_, pool, err := dbStoragePoolGet(d.db, poolName)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	etag := []interface{}{pool.PoolName, pool.PoolUsedBy, pool.PoolConfig}
+
+	return SyncResponseETag(true, &pool, etag)
+}
+
+// /1.0/storage-pools/{pool_name}
+// Rename storage pool.
+func storagePoolPost(d *Daemon, r *http.Request) Response {
+	return BadRequest(fmt.Errorf("Storage pools can currently not be renamed."))
+}
+
+// /1.0/storage-pools/{pool_name}
+// Replace pool properties.
+func storagePoolPut(d *Daemon, r *http.Request) Response {
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("You must provide a pool name."))
+	}
+
+	// Get the existing storage pool.
+	_, dbInfo, err := dbStoragePoolGet(d.db, poolName)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	// Validate the ETag
+	etag := []interface{}{dbInfo.PoolName, dbInfo.PoolUsedBy, dbInfo.PoolConfig}
+
+	err = etagCheck(r, etag)
+	if err != nil {
+		return PreconditionFailed(err)
+	}
+
+	req := api.StoragePool{}
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+		return BadRequest(err)
+	}
+
+	// Validate the configuration
+	err = storagePoolValidateConfig(poolName, req.PoolDriver, req.PoolConfig)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	err = storagePoolUpdate(d, poolName, req.PoolConfig)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	return EmptySyncResponse
+}
+
+// /1.0/storage-pools/{pool_name}
+// Change pool properties.
+func storagePoolPatch(d *Daemon, r *http.Request) Response {
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("You must provide a pool name."))
+	}
+
+	// Get the existing network
+	_, dbInfo, err := dbStoragePoolGet(d.db, poolName)
+	if dbInfo != nil {
+		return SmartError(err)
+	}
+
+	// Validate the ETag
+	etag := []interface{}{dbInfo.PoolName, dbInfo.PoolUsedBy, dbInfo.PoolConfig}
+
+	err = etagCheck(r, etag)
+	if err != nil {
+		return PreconditionFailed(err)
+	}
+
+	req := api.StoragePool{}
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+		return BadRequest(err)
+	}
+
+	// Config stacking
+	if req.PoolConfig == nil {
+		req.PoolConfig = map[string]string{}
+	}
+
+	for k, v := range dbInfo.PoolConfig {
+		_, ok := req.PoolConfig[k]
+		if !ok {
+			req.PoolConfig[k] = v
+		}
+	}
+
+	// Validate the configuration
+	err = storagePoolValidateConfig(poolName, req.PoolDriver, req.PoolConfig)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	err = storagePoolUpdate(d, poolName, req.PoolConfig)
+	if err != nil {
+		return InternalError(fmt.Errorf("Failed to update the storage pool configuration."))
+	}
+
+	return EmptySyncResponse
+}
+
+// /1.0/storage-pools/{pool_name}
+// Delete storage pool.
+func storagePoolDelete(d *Daemon, r *http.Request) Response {
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("You must provide a pool name."))
+	}
+
+	poolID, err := dbStoragePoolGetID(d.db, poolName)
+	if err != nil {
+		return NotFound
+	}
+
+	// Check if the storage pool has any volumes associated with it, if so
+	// error out.
+	volumes, err := dbStoragePoolVolumesGetNames(d.db, poolID)
+	if err != nil && err != NoSuchObjectError {
+		return InternalError(err)
+	}
+
+	if len(volumes) > 0 {
+		return BadRequest(fmt.Errorf("Storage pool \"%s\" has volumes attached to it.", poolName))
+	}
+
+	s, err := storagePoolInit(d, poolName)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	err = s.StoragePoolDelete()
+	if err != nil {
+		return InternalError(err)
+	}
+
+	err = dbStoragePoolDelete(d.db, poolName)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	return EmptySyncResponse
+}
+
+var storagePoolCmd = Command{name: "storage-pools/{pool_name}", get: storagePoolGet, post: storagePoolPost, put: storagePoolPut, patch: storagePoolPatch, delete: storagePoolDelete}
diff --git a/lxd/storage_pools_config.go b/lxd/storage_pools_config.go
new file mode 100644
index 0000000..87e36b6
--- /dev/null
+++ b/lxd/storage_pools_config.go
@@ -0,0 +1,162 @@
+package main
+
+import (
+	"fmt"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"github.com/lxc/lxd/shared"
+)
+
+var storagePoolConfigKeys = map[string]func(value string) error{
+	"source": shared.IsAny,
+	"size":   shared.IsAny,
+	"volume.block.mount_options":  shared.IsAny,
+	"volume.block.filesystem":     shared.IsAny,
+	"volume.size":                 shared.IsAny,
+	"volume.zfs.use_refquota":     shared.IsBool,
+	"volume.zfs.remove_snapshots": shared.IsBool,
+	"volume.lvm.thinpool_name":    shared.IsAny,
+	"zfs.pool_name":               shared.IsAny,
+}
+
+func storagePoolValidateConfig(name string, driver string, config map[string]string) error {
+	err := func(value string) error {
+		return shared.IsOneOf(value, supportedStorageTypes)
+	}(driver)
+	if err != nil {
+		return err
+	}
+
+	if config["source"] == "" {
+		if driver == "dir" {
+			config["source"] = filepath.Join(shared.VarPath("storage-pools"), name)
+		} else {
+			config["source"] = filepath.Join(shared.VarPath("disks"), name)
+		}
+	}
+
+	for key, val := range config {
+		// User keys are not validated.
+		if strings.HasPrefix(key, "user.") {
+			continue
+		}
+
+		// Validate storage pool config keys.
+		validator, ok := storagePoolConfigKeys[key]
+		if !ok {
+			return fmt.Errorf("Invalid storage pool configuration key: %s", key)
+		}
+
+		err := validator(val)
+		if err != nil {
+			return err
+		}
+
+		if driver != "zfs" || driver == "dir" {
+			if config["volume.zfs.use_refquota"] != "" {
+				return fmt.Errorf("Key volume.zfs.use_refquota cannot be used with non zfs storage pools.")
+			}
+
+			if config["volume.zfs.remove_snapshots"] != "" {
+				return fmt.Errorf("Key volume.zfs.remove_snapshots cannot be used with non zfs storage pools.")
+			}
+
+			if config["zfs.pool_name"] != "" {
+				return fmt.Errorf("Key zfs.pool_name cannot be used with non zfs storage pools.")
+			}
+		}
+
+		if driver == "dir" {
+			if config["size"] != "" {
+				return fmt.Errorf("Key size cannot be used with dir storage pools.")
+			}
+
+			if config["volume.block.mount_options"] != "" {
+				return fmt.Errorf("Key volume.block.mount_options cannot be used with dir storage pools.")
+			}
+
+			if config["volume.block.filesystem"] != "" {
+				return fmt.Errorf("Key volume.block.filesystem cannot be used with dir storage pools.")
+			}
+
+			if config["volume.size"] != "" {
+				return fmt.Errorf("Key volume.size cannot be used with dir storage pools.")
+			}
+		}
+	}
+
+	err = storagePoolFillDefault(name, driver, config)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func storagePoolFillDefault(name string, driver string, config map[string]string) error {
+	if driver != "dir" {
+		var size uint64
+		if config["size"] == "" {
+			st := syscall.Statfs_t{}
+			err := syscall.Statfs(shared.VarPath(), &st)
+			if err != nil {
+				return fmt.Errorf("couldn't statfs %s: %s", shared.VarPath(), err)
+			}
+
+			/* choose 15 GB < x < 100GB, where x is 20% of the disk size */
+			gb := uint64(1024 * 1024 * 1024)
+			size = uint64(st.Frsize) * st.Blocks / 5
+			if (size / gb) > 100 {
+				size = 100 * gb
+			}
+			if (size / gb) < 15 {
+				size = 15 * gb
+			}
+		} else {
+			sz, err := shared.ParseByteSizeString(config["size"])
+			if err != nil {
+				return err
+			}
+			size = uint64(sz)
+		}
+		config["size"] = strconv.FormatUint(uint64(size), 10)
+	}
+
+	if driver == "zfs" {
+		if val, ok := config["zfs.pool_name"]; !ok || val == "" {
+			config["zfs.pool_name"] = name
+		}
+	}
+
+	if driver == "lvm" {
+		if config["volume.lvm.thinpool_name"] == "" {
+			config["volume.lvm.thinpool_name"] = "LXDThinpool"
+		}
+
+		if config["volume.block.filesystem"] == "" {
+			config["volume.block.filesystem"] = "ext4"
+		}
+
+		if config["volume.block.mount_options"] == "" {
+			config["volume.block.mount_options"] = "discard"
+		}
+	}
+
+	if config["volume.size"] == "" {
+		if driver == "lvm" {
+			sz, err := shared.ParseByteSizeString("10GB")
+			if err != nil {
+				return err
+			}
+			size := uint64(sz)
+			config["volume.size"] = strconv.FormatUint(uint64(size), 10)
+		} else {
+			config["volume.size"] = "0"
+		}
+	}
+
+	return nil
+}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
new file mode 100644
index 0000000..2f4ff4d
--- /dev/null
+++ b/lxd/storage_pools_utils.go
@@ -0,0 +1,63 @@
+package main
+
+import (
+	"github.com/lxc/lxd/shared"
+)
+
+func storagePoolUpdate(d *Daemon, name string, newConfig map[string]string) error {
+	s, err := storagePoolInit(d, name)
+	if err != nil {
+		return err
+	}
+
+	oldWritable := s.GetStoragePoolWritable()
+	newWritable := oldWritable
+
+	// Backup the current state
+	oldConfig := map[string]string{}
+	err = shared.DeepCopy(&oldWritable.PoolConfig, &oldConfig)
+	if err != nil {
+		return err
+	}
+
+	// Define a function which reverts everything.  Defer this function
+	// so that it doesn't need to be explicitly called in every failing
+	// return path. Track whether or not we want to undo the changes
+	// using a closure.
+	undoChanges := true
+	defer func() {
+		if undoChanges {
+			s.SetStoragePoolWritable(&oldWritable)
+		}
+	}()
+
+	changedConfig, userOnly := storageConfigDiff(oldConfig, newConfig)
+	// Skip on no change
+	if len(changedConfig) == 0 {
+		return nil
+	}
+
+	// Update the storage pool
+	if !userOnly {
+		err = s.StoragePoolUpdate(changedConfig)
+		if err != nil {
+			return err
+		}
+	}
+
+	newWritable.PoolConfig = newConfig
+
+	// Apply the new configuration
+	s.SetStoragePoolWritable(&newWritable)
+
+	// Update the database
+	err = dbStoragePoolUpdate(d.db, name, newConfig)
+	if err != nil {
+		return err
+	}
+
+	// Success, update the closure to mark that the changes should be kept.
+	undoChanges = false
+
+	return nil
+}
diff --git a/lxd/storage_utils.go b/lxd/storage_utils.go
new file mode 100644
index 0000000..028025a
--- /dev/null
+++ b/lxd/storage_utils.go
@@ -0,0 +1,54 @@
+package main
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+
+	"github.com/lxc/lxd/shared"
+)
+
+func storageValidName(value string) error {
+	// Validate the character set
+	match, _ := regexp.MatchString("^[-a-zA-Z0-9]*$", value)
+	if !match {
+		return fmt.Errorf("Interface name contains invalid characters")
+	}
+
+	return nil
+}
+
+func storageConfigDiff(oldConfig map[string]string, newConfig map[string]string) ([]string, bool) {
+	changedConfig := []string{}
+	userOnly := true
+	for key, _ := range oldConfig {
+		if oldConfig[key] != newConfig[key] {
+			if !strings.HasPrefix(key, "user.") {
+				userOnly = false
+			}
+
+			if !shared.StringInSlice(key, changedConfig) {
+				changedConfig = append(changedConfig, key)
+			}
+		}
+	}
+
+	for key, _ := range newConfig {
+		if oldConfig[key] != newConfig[key] {
+			if !strings.HasPrefix(key, "user.") {
+				userOnly = false
+			}
+
+			if !shared.StringInSlice(key, changedConfig) {
+				changedConfig = append(changedConfig, key)
+			}
+		}
+	}
+
+	// Skip on no change
+	if len(changedConfig) == 0 {
+		return nil, false
+	}
+
+	return changedConfig, userOnly
+}
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
new file mode 100644
index 0000000..6bc54da
--- /dev/null
+++ b/lxd/storage_volumes.go
@@ -0,0 +1,507 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"strconv"
+
+	"github.com/gorilla/mux"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/version"
+)
+
+// /1.0/storage-pools/{pool_name}/volumes
+// List all storage volumes attached to a given storage pool.
+func storagePoolVolumesGet(d *Daemon, r *http.Request) Response {
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("You must provide the name of the pool the volume belongs to."))
+	}
+
+	recursionStr := r.FormValue("recursion")
+	recursion, err := strconv.Atoi(recursionStr)
+	if err != nil {
+		recursion = 0
+	}
+
+	// Retrieve ID of the storage pool (and check if the storage pool
+	// exists).
+	poolID, err := dbStoragePoolGetID(d.db, poolName)
+	if err != nil {
+		if err == NoSuchObjectError {
+			return BadRequest(fmt.Errorf("The storage pool does not exist."))
+		}
+		return InternalError(err)
+	}
+
+	// Get all volumes currently attached to the storage pool by ID of the
+	// pool.
+	volumes, err := dbStoragePoolVolumesGet(d.db, poolID)
+	if err != nil && err != NoSuchObjectError {
+		return InternalError(err)
+	}
+
+	resultString := []string{}
+	if recursion == 0 {
+		for _, volume := range volumes {
+			apiEndpoint, err := storagePoolVolumeTypeNameToApiEndpoint(volume.VolumeType)
+			if err != nil {
+				return InternalError(err)
+			}
+			resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s/%s", version.APIVersion, poolName, apiEndpoint, volume.VolumeName))
+		}
+		return SyncResponse(true, resultString)
+	}
+
+	return SyncResponse(true, volumes)
+}
+
+var storagePoolVolumesCmd = Command{name: "storage-pools/{pool_name}/volumes", get: storagePoolVolumesGet}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}
+// List all storage volumes of a given volume type for a given storage pool.
+func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response {
+	// Get the name of the pool the storage volume is supposed to be
+	// attached to.
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("You must provide the name of the pool the volume belongs to."))
+	}
+
+	recursionStr := r.FormValue("recursion")
+	recursion, err := strconv.Atoi(recursionStr)
+	if err != nil {
+		recursion = 0
+	}
+
+	// Get the name of the volume type.
+	volumeTypeName := mux.Vars(r)["volume_type"]
+	if volumeTypeName == "" {
+		return BadRequest(fmt.Errorf("You must provide a storage volume type of the storage volume."))
+	}
+	// Convert the volume type name to our internal integer representation.
+	volumeType, err := storagePoolVolumeTypeNameToType(volumeTypeName)
+	if err != nil {
+		return BadRequest(err)
+	}
+	// Check that the storage volume type is valid.
+	if !shared.IntInSlice(volumeType, supportedVolumeTypes) {
+		return BadRequest(fmt.Errorf("Invalid storage volume type %s.", volumeTypeName))
+	}
+
+	// Retrieve ID of the storage pool (and check if the storage pool
+	// exists).
+	poolID, err := dbStoragePoolGetID(d.db, poolName)
+	if err != nil {
+		if err == NoSuchObjectError {
+			return BadRequest(fmt.Errorf("The storage pool does not exist."))
+		}
+		return InternalError(err)
+	}
+
+	// Get the names of all storage volumes of a given volume type currently
+	// attached to the storage pool.
+	volumes, err := dbStoragePoolVolumesGetType(d.db, volumeType, poolID)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	resultString := []string{}
+	resultMap := []*api.StorageVolume{}
+	for _, volume := range volumes {
+		if recursion == 0 {
+			apiEndpoint, err := storagePoolVolumeTypeToApiEndpoint(volumeType)
+			if err != nil {
+				return InternalError(err)
+			}
+			resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s/%s", version.APIVersion, poolName, apiEndpoint, volume))
+		} else {
+			_, vol, err := dbStoragePoolVolumeGetType(d.db, volume, volumeType, poolID)
+			if err != nil {
+				continue
+			}
+			resultMap = append(resultMap, vol)
+		}
+	}
+
+	if recursion == 0 {
+		return SyncResponse(true, resultString)
+	}
+
+	return SyncResponse(true, resultMap)
+}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}
+// Create a storage volume of a given volume type in a given storage pool.
+func storagePoolVolumesTypePost(d *Daemon, r *http.Request) Response {
+	req := api.StorageVolume{}
+
+	// Parse the request.
+	err := json.NewDecoder(r.Body).Decode(&req)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	// Sanity checks.
+	if req.VolumeName == "" {
+		return BadRequest(fmt.Errorf("No name provided"))
+	}
+
+	// Check that the name of the new storage volume is valid. (For example.
+	// zfs pools cannot contain "/" in their names.)
+	err = storageValidName(req.VolumeName)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	// Check that the user gave use a storage volume type for the storage
+	// volume we are about to create.
+	if req.VolumeType == "" {
+		return BadRequest(fmt.Errorf("You must provide a storage volume type of the storage volume."))
+	}
+
+	// Convert the volume type name to our internal integer representation.
+	volumeType, err := storagePoolVolumeTypeNameToType(req.VolumeType)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	// We currently only allow to create storage volumes of type
+	// storagePoolVolumeTypeCustom. So check, that nothing else was
+	// requested.
+	if volumeType != storagePoolVolumeTypeCustom {
+		return BadRequest(fmt.Errorf("Currently not allowed to create storage volumes of type %s.", req.VolumeType))
+	}
+
+	// Check if the user gave us a valid pool name in which the new storage
+	// volume is supposed to be created.
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("You must provide the name of the pool the volume belongs to."))
+	}
+
+	// Load storage pool the volume will be attached to.
+	poolID, poolStruct, err := dbStoragePoolGet(d.db, poolName)
+	if err != nil {
+		if err == NoSuchObjectError {
+			return BadRequest(fmt.Errorf("The storage pool does not exist."))
+		}
+		return InternalError(err)
+	}
+
+	// Check that a storage volume of the same storage volume type does not
+	// already exist.
+	volumeID, _ := dbStoragePoolVolumeGetTypeID(d.db, req.VolumeName, volumeType, poolID)
+	if volumeID > 0 {
+		return BadRequest(fmt.Errorf("A storage volume of type %s does already exist.", req.VolumeType))
+	}
+
+	// Make sure that we don't pass a nil to the next function.
+	if req.VolumeConfig == nil {
+		req.VolumeConfig = map[string]string{}
+	}
+
+	// Validate the requested storage volume configuration.
+	err = storageVolumeValidateConfig(poolName, req.VolumeConfig, poolStruct)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	// Create the database entry for the storage volume.
+	_, err = dbStoragePoolVolumeCreate(d.db, req.VolumeName, volumeType, poolID, req.VolumeConfig)
+	if err != nil {
+		return InternalError(fmt.Errorf("Error inserting %s of type %s into database: %s", poolName, req.VolumeType, err))
+	}
+
+	s, err := storagePoolVolumeInit(d, poolName, req.VolumeName, volumeType)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	// Create storage volume.
+	err = s.StoragePoolVolumeCreate()
+	if err != nil {
+		dbStoragePoolVolumeDelete(d.db, req.VolumeName, volumeType, poolID)
+		return InternalError(err)
+	}
+
+	apiEndpoint, err := storagePoolVolumeTypeToApiEndpoint(volumeType)
+	if err != nil {
+		return InternalError(err)
+	}
+	return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s", version.APIVersion, poolName, apiEndpoint))
+}
+
+var storagePoolVolumesTypeCmd = Command{name: "storage-pools/{pool_name}/volumes/{volume_type}", get: storagePoolVolumesTypeGet, post: storagePoolVolumesTypePost}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}/{volume_name}
+// Get storage volume of a given volume type on a given storage pool.
+func storagePoolVolumeTypeGet(d *Daemon, r *http.Request) Response {
+	// Get the name of the storage volume.
+	volumeName := mux.Vars(r)["volume_name"]
+	if volumeName == "" {
+		return BadRequest(fmt.Errorf("You must provide a name for the volume."))
+	}
+
+	// Get the name of the storage pool the volume is supposed to be
+	// attached to.
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("You must provide the name of the pool the volume belongs to."))
+	}
+
+	// Get the name of the volume type.
+	volumeTypeName := mux.Vars(r)["volume_type"]
+	if volumeTypeName == "" {
+		return BadRequest(fmt.Errorf("You must provide a storage volume type of the storage volume."))
+	}
+	// Convert the volume type name to our internal integer representation.
+	volumeType, err := storagePoolVolumeTypeNameToType(volumeTypeName)
+	if err != nil {
+		return BadRequest(err)
+	}
+	// Check that the storage volume type is valid.
+	if !shared.IntInSlice(volumeType, supportedVolumeTypes) {
+		return BadRequest(fmt.Errorf("Invalid storage volume type %s.", volumeTypeName))
+	}
+
+	// Get the ID of the storage pool the storage volume is supposed to be
+	// attached to.
+	poolID, err := dbStoragePoolGetID(d.db, poolName)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	// Get the storage volume.
+	_, volume, err := dbStoragePoolVolumeGetType(d.db, volumeName, volumeType, poolID)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	etag := []interface{}{volume.VolumeName, volume.VolumeType, volume.VolumeUsedBy, volume.VolumeConfig}
+
+	return SyncResponseETag(true, volume, etag)
+}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}/{volume_name}
+func storagePoolVolumeTypePost(d *Daemon, r *http.Request) Response {
+	return BadRequest(fmt.Errorf("Storage volumes can currently not be renamed."))
+}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}/{volume_name}
+func storagePoolVolumeTypePut(d *Daemon, r *http.Request) Response {
+	// Get the name of the storage volume.
+	volumeName := mux.Vars(r)["volume_name"]
+	if volumeName == "" {
+		return BadRequest(fmt.Errorf("You must provide a name for the volume."))
+	}
+
+	// Get the name of the storage pool the volume is supposed to be
+	// attached to.
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("You must provide the name of the pool the volume belongs to."))
+	}
+
+	// Get the name of the volume type.
+	volumeTypeName := mux.Vars(r)["volume_type"]
+	if volumeTypeName == "" {
+		return BadRequest(fmt.Errorf("You must provide a storage volume type of the storage volume."))
+	}
+	// Convert the volume type name to our internal integer representation.
+	volumeType, err := storagePoolVolumeTypeNameToType(volumeTypeName)
+	if err != nil {
+		return BadRequest(err)
+	}
+	// Check that the storage volume type is valid.
+	if !shared.IntInSlice(volumeType, supportedVolumeTypes) {
+		return BadRequest(fmt.Errorf("Invalid storage volume type %s.", volumeTypeName))
+	}
+
+	poolID, pool, err := dbStoragePoolGet(d.db, poolName)
+	if err != nil {
+		if err == NoSuchObjectError {
+			return BadRequest(fmt.Errorf("The storage pool does not exist."))
+		}
+		return InternalError(err)
+	}
+
+	// Get the existing storage volume.
+	_, volume, err := dbStoragePoolVolumeGetType(d.db, volumeName, volumeType, poolID)
+	if err != nil {
+		if err == NoSuchObjectError {
+			return BadRequest(fmt.Errorf("The storage volume does not exist."))
+		}
+		return InternalError(err)
+	}
+
+	// Validate the ETag
+	etag := []interface{}{volume.VolumeName, volume.VolumeType, volume.VolumeUsedBy, volume.VolumeConfig}
+
+	err = etagCheck(r, etag)
+	if err != nil {
+		return PreconditionFailed(err)
+	}
+
+	req := api.StorageVolume{}
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+		return BadRequest(err)
+	}
+
+	// Validate the configuration
+	err = storageVolumeValidateConfig(req.VolumeName, req.VolumeConfig, pool)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	err = storagePoolVolumeUpdate(d, poolName, req.VolumeName, volumeType, req.VolumeConfig)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	return EmptySyncResponse
+}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}/{volume_name}
+func storagePoolVolumeTypePatch(d *Daemon, r *http.Request) Response {
+	// Get the name of the storage volume.
+	volumeName := mux.Vars(r)["volume_name"]
+	if volumeName == "" {
+		return BadRequest(fmt.Errorf("You must provide a name for the volume."))
+	}
+
+	// Get the name of the storage pool the volume is supposed to be
+	// attached to.
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("You must provide the name of the pool the volume belongs to."))
+	}
+
+	// Get the name of the volume type.
+	volumeTypeName := mux.Vars(r)["volume_type"]
+	if volumeTypeName == "" {
+		return BadRequest(fmt.Errorf("You must provide a storage volume type of the storage volume."))
+	}
+	// Convert the volume type name to our internal integer representation.
+	volumeType, err := storagePoolVolumeTypeNameToType(volumeTypeName)
+	if err != nil {
+		return BadRequest(err)
+	}
+	// Check that the storage volume type is valid.
+	if !shared.IntInSlice(volumeType, supportedVolumeTypes) {
+		return BadRequest(fmt.Errorf("Invalid storage volume type %s.", volumeTypeName))
+	}
+
+	// Get the ID of the storage pool the storage volume is supposed to be
+	// attached to.
+	poolID, pool, err := dbStoragePoolGet(d.db, poolName)
+	if err != nil {
+		if err == NoSuchObjectError {
+			return BadRequest(fmt.Errorf("The storage pool does not exist."))
+		}
+		return InternalError(err)
+	}
+
+	// Get the existing storage volume.
+	_, volume, err := dbStoragePoolVolumeGetType(d.db, volumeName, volumeType, poolID)
+	if err != nil {
+		if err == NoSuchObjectError {
+			return BadRequest(fmt.Errorf("The storage volume does not exist."))
+		}
+		return InternalError(err)
+	}
+
+	// Validate the ETag
+	etag := []interface{}{volume.VolumeName, volume.VolumeType, volume.VolumeUsedBy, volume.VolumeConfig}
+
+	err = etagCheck(r, etag)
+	if err != nil {
+		return PreconditionFailed(err)
+	}
+
+	req := api.StorageVolume{}
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+		return BadRequest(err)
+	}
+
+	if req.VolumeConfig == nil {
+		req.VolumeConfig = map[string]string{}
+	}
+
+	for k, v := range volume.VolumeConfig {
+		_, ok := req.VolumeConfig[k]
+		if !ok {
+			req.VolumeConfig[k] = v
+		}
+	}
+
+	// Validate the configuration
+	err = storageVolumeValidateConfig(volumeName, req.VolumeConfig, pool)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	err = storagePoolVolumeUpdate(d, poolName, req.VolumeName, volumeType, req.VolumeConfig)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	return EmptySyncResponse
+}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}/{volume_name}
+func storagePoolVolumeTypeDelete(d *Daemon, r *http.Request) Response {
+	// Get the name of the storage volume.
+	volumeName := mux.Vars(r)["volume_name"]
+	if volumeName == "" {
+		return BadRequest(fmt.Errorf("You must provide a name for the volume."))
+	}
+
+	// Get the name of the storage pool the volume is supposed to be
+	// attached to.
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("You must provide the name of the pool the volume belongs to."))
+	}
+
+	// Get the name of the volume type.
+	volumeTypeName := mux.Vars(r)["volume_type"]
+	if volumeTypeName == "" {
+		return BadRequest(fmt.Errorf("You must provide a storage volume type of the storage volume."))
+	}
+	// Convert the volume type name to our internal integer representation.
+	volumeType, err := storagePoolVolumeTypeNameToType(volumeTypeName)
+	if err != nil {
+		return BadRequest(err)
+	}
+	// Check that the storage volume type is valid.
+	if !shared.IntInSlice(volumeType, supportedVolumeTypes) {
+		return BadRequest(fmt.Errorf("Invalid storage volume type %s.", volumeTypeName))
+	}
+
+	s, err := storagePoolVolumeInit(d, poolName, volumeName, volumeType)
+	if err != nil {
+		return NotFound
+	}
+
+	err = s.StoragePoolVolumeDelete()
+	if err != nil {
+		return InternalError(err)
+	}
+
+	poolID, err := dbStoragePoolGetID(d.db, poolName)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	err = dbStoragePoolVolumeDelete(d.db, volumeName, volumeType, poolID)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	return EmptySyncResponse
+}
+
+var storagePoolVolumeTypeCmd = Command{name: "storage-pools/{pool_name}/volumes/{volume_type}/{volume_name}", get: storagePoolVolumeTypeGet, post: storagePoolVolumeTypePost, put: storagePoolVolumeTypePut, patch: storagePoolVolumeTypePatch, delete: storagePoolVolumeTypeDelete}
diff --git a/lxd/storage_volumes_config.go b/lxd/storage_volumes_config.go
new file mode 100644
index 0000000..83badb6
--- /dev/null
+++ b/lxd/storage_volumes_config.go
@@ -0,0 +1,125 @@
+package main
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
+)
+
+var storageVolumeConfigKeys = map[string]func(value string) error{
+	"block.mount_options":  shared.IsAny,
+	"block.filesystem":     shared.IsAny,
+	"size":                 shared.IsAny,
+	"zfs.use_refquota":     shared.IsBool,
+	"zfs.remove_snapshots": shared.IsBool,
+}
+
+func storageVolumeValidateConfig(name string, config map[string]string, parentPool *api.StoragePool) error {
+	for key, val := range config {
+		// User keys are not validated.
+		if strings.HasPrefix(key, "user.") {
+			continue
+		}
+
+		// Validate storage volume config keys.
+		validator, ok := storageVolumeConfigKeys[key]
+		if !ok {
+			return fmt.Errorf("Invalid storage volume configuration key: %s", key)
+		}
+
+		err := validator(val)
+		if err != nil {
+			return err
+		}
+
+		if parentPool.PoolDriver != "zfs" || parentPool.PoolDriver == "dir" {
+			if config["zfs.use_refquota"] != "" {
+				return fmt.Errorf("Key volume.zfs.use_refquota cannot be used with non zfs storage volumes.")
+			}
+
+			if config["zfs.remove_snapshots"] != "" {
+				return fmt.Errorf("Key volume.zfs.remove_snapshots cannot be used with non zfs storage volumes.")
+			}
+		}
+
+		if parentPool.PoolDriver == "dir" {
+			if config["block.mount_options"] != "" {
+				return fmt.Errorf("Key block.mount_options cannot be used with dir storage volumes.")
+			}
+
+			if config["block.filesystem"] != "" {
+				return fmt.Errorf("Key block.filesystem cannot be used with dir storage volumes.")
+			}
+
+			if config["size"] != "" {
+				return fmt.Errorf("Key size cannot be used with dir storage volumes.")
+			}
+		}
+
+		if parentPool.PoolDriver == "lvm" {
+			if config["block.filesystem"] == "" {
+				config["block.filesystem"] = parentPool.PoolConfig["volume.block.filesystem"]
+			}
+
+			if config["block.mount_options"] == "" {
+				config["block.mount_options"] = parentPool.PoolConfig["volume.block.mount_options"]
+			}
+		}
+	}
+
+	err := storageVolumeFillDefault(name, config, parentPool)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func storageVolumeFillDefault(name string, config map[string]string, parentPool *api.StoragePool) error {
+	if parentPool.PoolDriver == "dir" {
+		config["size"] = "0"
+	} else if parentPool.PoolDriver == "lvm" {
+		if config["size"] == "0" || config["size"] == "" {
+			config["size"] = parentPool.PoolConfig["volume.size"]
+		}
+
+		if config["size"] == "0" || config["size"] == "" {
+			sz, err := shared.ParseByteSizeString("10GB")
+			if err != nil {
+				return err
+			}
+			size := uint64(sz)
+			config["size"] = strconv.FormatUint(uint64(size), 10)
+		}
+	} else {
+		if config["size"] == "" {
+			config["size"] = parentPool.PoolConfig["volume.size"]
+		}
+
+		if config["size"] == "" {
+			config["size"] = "0"
+		}
+	}
+
+	if parentPool.PoolDriver == "lvm" {
+		if config["block.filesystem"] == "" {
+			config["block.filesystem"] = "ext4"
+		}
+
+		if config["block.mount_options"] == "" && config["block.filesystem"] == "ext4" {
+			config["block.mount_options"] = "discard"
+		}
+
+		if config["lvm.thinpool_name"] == "" {
+			config["lvm.thinpool_name"] = parentPool.PoolConfig["volume.lvm.thinpool_name"]
+			if config["lvm.thinpool_name"] == "" {
+				config["lvm.thinpool_name"] = "LXDThinPool"
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
new file mode 100644
index 0000000..7bc2383
--- /dev/null
+++ b/lxd/storage_volumes_utils.go
@@ -0,0 +1,188 @@
+package main
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/lxc/lxd/shared"
+)
+
+const (
+	storagePoolVolumeTypeContainer = iota
+	storagePoolVolumeTypeImage
+	storagePoolVolumeTypeCustom
+)
+
+// COMMENT(brauner): Leave the string type in here! This guarantees that go
+// treats this is as a typed string constant. Removing it causes go to treat
+// these as untyped string constants which is not what we want.
+const (
+	storagePoolVolumeTypeNameContainer string = "container"
+	storagePoolVolumeTypeNameImage     string = "image"
+	storagePoolVolumeTypeNameCustom    string = "custom"
+)
+
+// COMMENT(brauner): Leave the string type in here! This guarantees that go
+// treats this is as a typed string constant. Removing it causes go to treat
+// these as untyped string constants which is not what we want.
+const (
+	storagePoolVolumeApiEndpointContainers string = "containers"
+	storagePoolVolumeApiEndpointImages     string = "images"
+	storagePoolVolumeApiEndpointCustom     string = "custom"
+)
+
+var supportedVolumeTypes = []int{storagePoolVolumeTypeContainer, storagePoolVolumeTypeImage, storagePoolVolumeTypeCustom}
+
+func storagePoolVolumeTypeNameToType(volumeTypeName string) (int, error) {
+	switch volumeTypeName {
+	case storagePoolVolumeTypeNameContainer:
+		return storagePoolVolumeTypeContainer, nil
+	case storagePoolVolumeTypeNameImage:
+		return storagePoolVolumeTypeImage, nil
+	case storagePoolVolumeTypeNameCustom:
+		return storagePoolVolumeTypeCustom, nil
+	}
+
+	return -1, fmt.Errorf("Invalid storage volume type name.")
+}
+
+func storagePoolVolumeTypeNameToApiEndpoint(volumeTypeName string) (string, error) {
+	switch volumeTypeName {
+	case storagePoolVolumeTypeNameContainer:
+		return storagePoolVolumeApiEndpointContainers, nil
+	case storagePoolVolumeTypeNameImage:
+		return storagePoolVolumeApiEndpointImages, nil
+	case storagePoolVolumeTypeNameCustom:
+		return storagePoolVolumeApiEndpointCustom, nil
+	}
+
+	return "", fmt.Errorf("Invalid storage volume type name.")
+}
+
+func storagePoolVolumeTypeToName(volumeType int) (string, error) {
+	switch volumeType {
+	case storagePoolVolumeTypeContainer:
+		return storagePoolVolumeTypeNameContainer, nil
+	case storagePoolVolumeTypeImage:
+		return storagePoolVolumeTypeNameImage, nil
+	case storagePoolVolumeTypeCustom:
+		return storagePoolVolumeTypeNameCustom, nil
+	}
+
+	return "", fmt.Errorf("Invalid storage volume type.")
+}
+
+func storagePoolVolumeTypeToApiEndpoint(volumeType int) (string, error) {
+	switch volumeType {
+	case storagePoolVolumeTypeContainer:
+		return storagePoolVolumeApiEndpointContainers, nil
+	case storagePoolVolumeTypeImage:
+		return storagePoolVolumeApiEndpointImages, nil
+	case storagePoolVolumeTypeCustom:
+		return storagePoolVolumeApiEndpointCustom, nil
+	}
+
+	return "", fmt.Errorf("Invalid storage volume type.")
+}
+
+func storagePoolVolumeApiEndpointToType(apiEndpoint string) (int, error) {
+	switch apiEndpoint {
+	case storagePoolVolumeApiEndpointContainers:
+		return storagePoolVolumeTypeContainer, nil
+	case storagePoolVolumeApiEndpointImages:
+		return storagePoolVolumeTypeImage, nil
+	case storagePoolVolumeApiEndpointCustom:
+		return storagePoolVolumeTypeCustom, nil
+	}
+
+	return -1, fmt.Errorf("Invalid storage volume api endpoint.")
+}
+
+func storagePoolVolumeUpdate(d *Daemon, poolName string, volumeName string, volumeType int, newConfig map[string]string) error {
+	s, err := storagePoolVolumeInit(d, poolName, volumeName, volumeType)
+	if err != nil {
+		return err
+	}
+
+	oldWritable := s.GetStoragePoolVolumeWritable()
+	newWritable := oldWritable
+
+	// Backup the current state
+	oldConfig := map[string]string{}
+	err = shared.DeepCopy(&oldWritable.VolumeConfig, &oldConfig)
+	if err != nil {
+		return err
+	}
+
+	// Define a function which reverts everything.  Defer this function
+	// so that it doesn't need to be explicitly called in every failing
+	// return path. Track whether or not we want to undo the changes
+	// using a closure.
+	undoChanges := true
+	defer func() {
+		if undoChanges {
+			s.SetStoragePoolVolumeWritable(&oldWritable)
+		}
+	}()
+
+	// Diff the configurations
+	changedConfig := []string{}
+	userOnly := true
+	for key, _ := range oldConfig {
+		if oldConfig[key] != newConfig[key] {
+			if !strings.HasPrefix(key, "user.") {
+				userOnly = false
+			}
+
+			if !shared.StringInSlice(key, changedConfig) {
+				changedConfig = append(changedConfig, key)
+			}
+		}
+	}
+
+	for key, _ := range newConfig {
+		if oldConfig[key] != newConfig[key] {
+			if !strings.HasPrefix(key, "user.") {
+				userOnly = false
+			}
+
+			if !shared.StringInSlice(key, changedConfig) {
+				changedConfig = append(changedConfig, key)
+			}
+		}
+	}
+
+	// Skip on no change
+	if len(changedConfig) == 0 {
+		return nil
+	}
+
+	// Update the storage pool
+	if !userOnly {
+		err = s.StoragePoolVolumeUpdate(changedConfig)
+		if err != nil {
+			return err
+		}
+	}
+
+	newWritable.VolumeConfig = newConfig
+
+	// Apply the new configuration
+	s.SetStoragePoolVolumeWritable(&newWritable)
+
+	poolID, err := dbStoragePoolGetID(d.db, poolName)
+	if err != nil {
+		return err
+	}
+
+	// Update the database
+	err = dbStoragePoolVolumeUpdate(d.db, volumeName, volumeType, poolID, newConfig)
+	if err != nil {
+		return err
+	}
+
+	// Success, update the closure to mark that the changes should be kept.
+	undoChanges = false
+
+	return nil
+}
diff --git a/shared/api/storage.go b/shared/api/storage.go
new file mode 100644
index 0000000..767b5ab
--- /dev/null
+++ b/shared/api/storage.go
@@ -0,0 +1,37 @@
+package api
+
+// StoragePoolConfig represents the fields of a LXD storage pool.
+type StoragePool struct {
+	PoolName       string   `json:"pool_name" yaml:"pool_name"`
+	PoolDriver     string   `json:"driver" yaml:"driver"`
+	PoolUsedBy     []string `json:"used_by" yaml:"used_by"`
+	StoragePoolPut `yaml:",inline"`
+}
+
+type StoragePoolPut struct {
+	PoolConfig map[string]string `json:"config" yaml:"config"`
+}
+
+// StorageVolumeConfig represents the fields of a LXD storage volume.
+type StorageVolume struct {
+	VolumeType       string   `json:"volume_type" yaml:"volume_type"`
+	VolumeUsedBy     []string `json:"used_by" yaml:"used_by"`
+	StorageVolumePut `yaml:",inline"`
+}
+
+type StorageVolumePut struct {
+	VolumeName   string            `json:"volume_name" yaml:"volume_name"`
+	VolumeConfig map[string]string `json:"config" yaml:"config"`
+}
+
+// Writable converts a full StoragePool struct into a StoragePoolPut struct
+// (filters read-only fields).
+func (storagePool *StoragePool) Writable() StoragePoolPut {
+	return storagePool.StoragePoolPut
+}
+
+// Writable converts a full StorageVolume struct into a StorageVolumePut struct
+// (filters read-only fields).
+func (storageVolume *StorageVolume) Writable() StorageVolumePut {
+	return storageVolume.StorageVolumePut
+}

From b3089d58b12094fda0ab1b452c4752bbded7ff21 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Tue, 17 Jan 2017 15:03:28 +0100
Subject: [PATCH 03/63] lxd/daemon: create storage api directories

The new storage api adds the following directories:

- ${LXD_DIR}/custom
  - Mountpoints for storage volumes of type custom.
- ${LXD_DIR}/disks
  - Repository for storage pool loop files when the user has not specified a
    source.
- ${LXD_DIR}/storage-pools
  - Mountpoints for storage pools.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/daemon.go | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index e0fc291..6bc6c90 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -773,6 +773,21 @@ func (d *Daemon) Init() error {
 		return err
 	}
 
+	// COMMENT(brauner):
+	// API addition: storage
+	// Repository for storage pool loop files when the user has not
+	// specified a source.
+	if err := os.MkdirAll(shared.VarPath("disks"), 0700); err != nil {
+		return err
+	}
+
+	// COMMENT(brauner):
+	// API addition: storage
+	// Mountpoints for storage pools.
+	if err := os.MkdirAll(shared.VarPath("storage-pools"), 0711); err != nil {
+		return err
+	}
+
 	/* Detect the filesystem */
 	d.BackingFs, err = filesystemDetect(d.lxcpath)
 	if err != nil {

From 0a5e6e39413fd6b6d14b446679cd674fd0c72431 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 5 Jan 2017 13:05:53 +0100
Subject: [PATCH 04/63] lxd/api_1.0: add new storage api commands

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/api_1.0.go | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index d4de314..9b42a53 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -44,6 +44,11 @@ var api10 = []Command{
 	certificateFingerprintCmd,
 	profilesCmd,
 	profileCmd,
+	storagePoolsCmd,
+	storagePoolCmd,
+	storagePoolVolumesCmd,
+	storagePoolVolumesTypeCmd,
+	storagePoolVolumeTypeCmd,
 }
 
 func api10Get(d *Daemon, r *http.Request) Response {
@@ -84,6 +89,7 @@ func api10Get(d *Daemon, r *http.Request) Response {
 			"id_map",
 			"network_firewall_filtering",
 			"network_routes",
+			"storage",
 		},
 		APIStatus:  "stable",
 		APIVersion: version.APIVersion,

From 3cc02ea651ae81001fa0e744b42eee16260b4d7e Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 5 Jan 2017 13:07:41 +0100
Subject: [PATCH 05/63] client: add new client storage api

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 client.go | 163 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 163 insertions(+)

diff --git a/client.go b/client.go
index b4629b8..17dc448 100644
--- a/client.go
+++ b/client.go
@@ -2801,3 +2801,166 @@ func (c *Client) ListNetworks() ([]api.Network, error) {
 
 	return networks, nil
 }
+
+// Storage functions
+func (c *Client) ListStoragePools() ([]api.StoragePool, error) {
+	if c.Remote.Public {
+		return nil, fmt.Errorf("This function isn't supported by public remotes.")
+	}
+
+	resp, err := c.get("storage-pools?recursion=1")
+	if err != nil {
+		return nil, err
+	}
+
+	pools := []api.StoragePool{}
+	if err := json.Unmarshal(resp.Metadata, &pools); err != nil {
+		return nil, err
+	}
+
+	return pools, nil
+}
+
+func (c *Client) StoragePoolCreate(name string, driver string, config map[string]string) error {
+	if c.Remote.Public {
+		return fmt.Errorf("This function isn't supported by public remotes.")
+	}
+
+	body := shared.Jmap{"pool_name": name, "driver": driver, "config": config}
+
+	_, err := c.post("storage-pools", body, api.SyncResponse)
+	return err
+}
+
+func (c *Client) StoragePoolDelete(name string) error {
+	if c.Remote.Public {
+		return fmt.Errorf("This function isn't supported by public remotes.")
+	}
+
+	_, err := c.delete(fmt.Sprintf("storage-pools/%s", name), nil, api.SyncResponse)
+	return err
+}
+
+func (c *Client) StoragePoolGet(name string) (api.StoragePool, error) {
+	if c.Remote.Public {
+		return api.StoragePool{}, fmt.Errorf("This function isn't supported by public remotes.")
+	}
+
+	resp, err := c.get(fmt.Sprintf("storage-pools/%s", name))
+	if err != nil {
+		return api.StoragePool{}, err
+	}
+
+	pools := api.StoragePool{}
+	if err := json.Unmarshal(resp.Metadata, &pools); err != nil {
+		return api.StoragePool{}, err
+	}
+
+	return pools, nil
+}
+
+func (c *Client) StoragePoolPut(name string, pool api.StoragePool) error {
+	if c.Remote.Public {
+		return fmt.Errorf("This function isn't supported by public remotes.")
+	}
+
+	if pool.PoolName != name {
+		return fmt.Errorf("Cannot change storage pool name")
+	}
+
+	_, err := c.put(fmt.Sprintf("storage-pools/%s", name), pool, api.SyncResponse)
+	return err
+}
+
+// /1.0/storage-pools/{pool_name}/volumes
+func (c *Client) StoragePoolVolumesList(pool string) ([]api.StorageVolume, error) {
+	if c.Remote.Public {
+		return nil, fmt.Errorf("This function isn't supported by public remotes.")
+	}
+
+	resp, err := c.get(fmt.Sprintf("storage-pools/%s/volumes?recursion=1", pool))
+	if err != nil {
+		return nil, err
+	}
+
+	volumes := []api.StorageVolume{}
+	if err := json.Unmarshal(resp.Metadata, &volumes); err != nil {
+		return nil, err
+	}
+
+	return volumes, nil
+}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}
+func (c *Client) StoragePoolVolumesTypeList(pool string, volumeType string) ([]api.StorageVolume, error) {
+	if c.Remote.Public {
+		return nil, fmt.Errorf("This function isn't supported by public remotes.")
+	}
+
+	resp, err := c.get(fmt.Sprintf("storage-pools/%s/volumes/%s?recursion=1", pool, volumeType))
+	if err != nil {
+		return nil, err
+	}
+
+	volumes := []api.StorageVolume{}
+	if err := json.Unmarshal(resp.Metadata, &volumes); err != nil {
+		return nil, err
+	}
+
+	return volumes, nil
+}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}
+func (c *Client) StoragePoolVolumeTypeCreate(pool string, volume string, volumeType string, config map[string]string) error {
+	if c.Remote.Public {
+		return fmt.Errorf("This function isn't supported by public remotes.")
+	}
+
+	body := shared.Jmap{"pool_name": pool, "volume_name": volume, "volume_type": volumeType, "config": config}
+
+	_, err := c.post(fmt.Sprintf("storage-pools/%s/volumes/%s", pool, volumeType), body, api.SyncResponse)
+	return err
+}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}/{volume_name}
+func (c *Client) StoragePoolVolumeTypeGet(pool string, volume string, volumeType string) (api.StorageVolume, error) {
+	if c.Remote.Public {
+		return api.StorageVolume{}, fmt.Errorf("This function isn't supported by public remotes.")
+	}
+
+	resp, err := c.get(fmt.Sprintf("storage-pools/%s/volumes/%s/%s", pool, volumeType, volume))
+	if err != nil {
+		return api.StorageVolume{}, err
+	}
+
+	vol := api.StorageVolume{}
+	if err := json.Unmarshal(resp.Metadata, &vol); err != nil {
+		return api.StorageVolume{}, err
+	}
+
+	return vol, nil
+}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}/{volume_name}
+func (c *Client) StoragePoolVolumeTypePut(pool string, volume string, volumeType string, volumeConfig api.StorageVolume) error {
+	if c.Remote.Public {
+		return fmt.Errorf("This function isn't supported by public remotes.")
+	}
+
+	if volumeConfig.VolumeName != volume {
+		return fmt.Errorf("Cannot change storage volume name")
+	}
+
+	_, err := c.put(fmt.Sprintf("storage-pools/%s/volumes/%s/%s", pool, volumeType, volume), volumeConfig, api.SyncResponse)
+	return err
+}
+
+// /1.0/storage-pools/{pool_name}/volumes/{volume_type}/{volume_name}
+func (c *Client) StoragePoolVolumeTypeDelete(pool string, volume string, volumeType string) error {
+	if c.Remote.Public {
+		return fmt.Errorf("This function isn't supported by public remotes.")
+	}
+
+	_, err := c.delete(fmt.Sprintf("storage-pools/%s/volumes/%s/%s", pool, volumeType, volume), nil, api.SyncResponse)
+	return err
+}

From b3db2afe6667d8fb2b20e4d066b70abaa9ccafcd Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 11:44:53 +0100
Subject: [PATCH 06/63] lxd/container*: add storage pool argument

This argument is used to create a container in a specific storage pool.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/container.go                 |  1 +
 lxd/container_lxc.go             | 12 ++++++++++--
 lxd/containers_post.go           |  4 ++++
 shared/api/container.go          | 12 ++++++++++++
 shared/api/container_snapshot.go |  9 +++++++++
 5 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index c151d5f..42febae 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -323,6 +323,7 @@ type containerArgs struct {
 	Name         string
 	Profiles     []string
 	Stateful     bool
+	StoragePool  string
 }
 
 // The container interface
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index dbfbb52..f524920 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -195,6 +195,7 @@ func containerLXCCreate(d *Daemon, args containerArgs) (container, error) {
 		profiles:     args.Profiles,
 		localConfig:  args.Config,
 		localDevices: args.Devices,
+		storagePool:  args.StoragePool,
 	}
 
 	ctxMap := log.Ctx{"name": c.name,
@@ -352,7 +353,9 @@ func containerLXCLoad(d *Daemon, args containerArgs) (container, error) {
 		profiles:     args.Profiles,
 		localConfig:  args.Config,
 		localDevices: args.Devices,
-		stateful:     args.Stateful}
+		stateful:     args.Stateful,
+		storagePool:  args.StoragePool,
+	}
 
 	// Detect the storage backend
 	s, err := storageForFilename(d, shared.VarPath("containers", strings.Split(c.name, "/")[0]))
@@ -394,7 +397,10 @@ type containerLXC struct {
 	c        *lxc.Container
 	daemon   *Daemon
 	idmapset *shared.IdmapSet
-	storage  storage
+
+	// Storage
+	storagePool string
+	storage     storage
 }
 
 func (c *containerLXC) createOperation(action string, reusable bool, reuse bool) (*lxcContainerOperation, error) {
@@ -2415,6 +2421,7 @@ func (c *containerLXC) Render() (interface{}, interface{}, error) {
 			Name:            c.name,
 			Profiles:        c.profiles,
 			Stateful:        c.stateful,
+			StoragePool:     c.storagePool,
 		}, etag, nil
 	} else {
 		// FIXME: Render shouldn't directly access the go-lxc struct
@@ -2431,6 +2438,7 @@ func (c *containerLXC) Render() (interface{}, interface{}, error) {
 			Status:          statusCode.String(),
 			StatusCode:      statusCode,
 			Stateful:        c.stateful,
+			StoragePool:     c.storagePool,
 		}
 
 		ct.Architecture = architectureName
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 598cabf..428dfa6 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -115,6 +115,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 			Ephemeral:    req.Ephemeral,
 			Name:         req.Name,
 			Profiles:     req.Profiles,
+			StoragePool:  req.StoragePool,
 		}
 
 		_, err = containerCreateFromImage(d, args, hash)
@@ -146,6 +147,7 @@ func createFromNone(d *Daemon, req *api.ContainersPost) Response {
 		Ephemeral:    req.Ephemeral,
 		Name:         req.Name,
 		Profiles:     req.Profiles,
+		StoragePool:  req.StoragePool,
 	}
 
 	run := func(op *operation) error {
@@ -183,6 +185,7 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 		Ephemeral:    req.Ephemeral,
 		Name:         req.Name,
 		Profiles:     req.Profiles,
+		StoragePool:  req.StoragePool,
 	}
 
 	var c container
@@ -339,6 +342,7 @@ func createFromCopy(d *Daemon, req *api.ContainersPost) Response {
 		Ephemeral:    req.Ephemeral,
 		Name:         req.Name,
 		Profiles:     req.Profiles,
+		StoragePool:  req.StoragePool,
 	}
 
 	run := func(op *operation) error {
diff --git a/shared/api/container.go b/shared/api/container.go
index 1c57590..8e7f728 100644
--- a/shared/api/container.go
+++ b/shared/api/container.go
@@ -10,12 +10,18 @@ type ContainersPost struct {
 
 	Name   string          `json:"name"`
 	Source ContainerSource `json:"source"`
+
+	// API extension: storage
+	StoragePool string `json:"storage_pool"`
 }
 
 // ContainerPost represents the fields required to rename/move a LXD container
 type ContainerPost struct {
 	Migration bool   `json:"migration"`
 	Name      string `json:"name"`
+
+	// API extension: storage
+	StoragePool string `json:"storage_pool"`
 }
 
 // ContainerPut represents the modifiable fields of a LXD container
@@ -42,6 +48,9 @@ type Container struct {
 
 	// API extension: container_last_used_at
 	LastUsedAt time.Time `json:"last_used_at"`
+
+	// API extension: storage
+	StoragePool string `json:"storage_pool"`
 }
 
 // Writable converts a full Container struct into a ContainerPut struct (filters read-only fields)
@@ -87,4 +96,7 @@ type ContainerSource struct {
 
 	// For "copy" type
 	Source string `json:"source,omitempty"`
+
+	// API extension: storage
+	StoragePool string `json:"storage_pool"`
 }
diff --git a/shared/api/container_snapshot.go b/shared/api/container_snapshot.go
index ab65ab7..86f142c 100644
--- a/shared/api/container_snapshot.go
+++ b/shared/api/container_snapshot.go
@@ -8,12 +8,18 @@ import (
 type ContainerSnapshotsPost struct {
 	Name     string `json:"name"`
 	Stateful bool   `json:"stateful"`
+
+	// API extension: storage
+	StoragePool string `json:"storage_pool"`
 }
 
 // ContainerSnapshotPost represents the fields required to rename/move a LXD container snapshot
 type ContainerSnapshotPost struct {
 	Name      string `json:"name"`
 	Migration bool   `json:"migration"`
+
+	// API extension: storage
+	StoragePool string `json:"storage_pool"`
 }
 
 // ContainerSnapshot represents a LXD conainer snapshot
@@ -29,4 +35,7 @@ type ContainerSnapshot struct {
 	Name            string                       `json:"name"`
 	Profiles        []string                     `json:"profiles"`
 	Stateful        bool                         `json:"stateful"`
+
+	// API extension: storage
+	StoragePool string `json:"storage_pool"`
 }

From 153006e6e1844f73f399c3f809a5adb512d526da Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 11:47:54 +0100
Subject: [PATCH 07/63] client: add storage pool argument

The storage pool argument is used to create containers in a specific storage
pool.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 client.go     |  4 +++-
 lxc/init.go   | 27 ++++++++++++++++++++-------
 lxc/launch.go | 17 +++++++++++++----
 3 files changed, 36 insertions(+), 12 deletions(-)

diff --git a/client.go b/client.go
index 17dc448..399e9d1 100644
--- a/client.go
+++ b/client.go
@@ -1274,7 +1274,7 @@ func (c *Client) GetAlias(alias string) string {
 
 // Init creates a container from either a fingerprint or an alias; you must
 // provide at least one.
-func (c *Client) Init(name string, imgremote string, image string, profiles *[]string, config map[string]string, devices map[string]map[string]string, ephem bool) (*api.Response, error) {
+func (c *Client) Init(name string, imgremote string, image string, profiles *[]string, config map[string]string, devices map[string]map[string]string, storagePool string, ephem bool) (*api.Response, error) {
 	if c.Remote.Public {
 		return nil, fmt.Errorf("This function isn't supported by public remotes.")
 	}
@@ -1364,6 +1364,8 @@ func (c *Client) Init(name string, imgremote string, image string, profiles *[]s
 
 	body := shared.Jmap{"source": source}
 
+	body["storage_pool"] = storagePool
+
 	if name != "" {
 		body["name"] = name
 	}
diff --git a/lxc/init.go b/lxc/init.go
index 4305902..bde6aaf 100644
--- a/lxc/init.go
+++ b/lxc/init.go
@@ -60,10 +60,11 @@ func (f *profileList) Set(value string) error {
 var initRequestedEmptyProfiles bool
 
 type initCmd struct {
-	profArgs profileList
-	confArgs configList
-	ephem    bool
-	network  string
+	profArgs    profileList
+	confArgs    configList
+	ephem       bool
+	network     string
+	storagePool string
 }
 
 func (c *initCmd) showByDefault() bool {
@@ -74,7 +75,7 @@ func (c *initCmd) usage() string {
 	return i18n.G(
 		`Initialize a container from a particular image.
 
-lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>]
+lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>]
 
 Initializes a container using the specified image and name.
 
@@ -141,6 +142,8 @@ func (c *initCmd) flags() {
 	gnuflag.BoolVar(&c.ephem, "e", false, i18n.G("Ephemeral container"))
 	gnuflag.StringVar(&c.network, "network", "", i18n.G("Network name"))
 	gnuflag.StringVar(&c.network, "n", "", i18n.G("Network name"))
+	gnuflag.StringVar(&c.storagePool, "storage", "", i18n.G("Storage pool name"))
+	gnuflag.StringVar(&c.storagePool, "s", "", i18n.G("Storage pool name"))
 }
 
 func (c *initCmd) run(config *lxd.Config, args []string) error {
@@ -197,10 +200,20 @@ func (c *initCmd) run(config *lxd.Config, args []string) error {
 		}
 	}
 
+	// Check if the specified storage pool exists.
+	storagePool := ""
+	if c.storagePool != "" {
+		pool, err := d.StoragePoolGet(c.storagePool)
+		if err != nil {
+			return err
+		}
+		storagePool = pool.PoolName
+	}
+
 	if !initRequestedEmptyProfiles && len(profiles) == 0 {
-		resp, err = d.Init(name, iremote, image, nil, configMap, devicesMap, c.ephem)
+		resp, err = d.Init(name, iremote, image, nil, configMap, devicesMap, storagePool, c.ephem)
 	} else {
-		resp, err = d.Init(name, iremote, image, &profiles, configMap, devicesMap, c.ephem)
+		resp, err = d.Init(name, iremote, image, &profiles, configMap, devicesMap, storagePool, c.ephem)
 	}
 	if err != nil {
 		return err
diff --git a/lxc/launch.go b/lxc/launch.go
index fd3d531..29c92cf 100644
--- a/lxc/launch.go
+++ b/lxc/launch.go
@@ -23,7 +23,7 @@ func (c *launchCmd) usage() string {
 	return i18n.G(
 		`Launch a container from a particular image.
 
-lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>]
+lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>]
 
 Launches a container using the specified image and name.
 
@@ -85,12 +85,21 @@ func (c *launchCmd) run(config *lxd.Config, args []string) error {
 		}
 	}
 
+	// Check if the specified storage pool exists.
+	storagePool := ""
+	if c.init.storagePool != "" {
+		pool, err := d.StoragePoolGet(c.init.storagePool)
+		if err != nil {
+			return err
+		}
+		storagePool = pool.PoolName
+	}
+
 	if !initRequestedEmptyProfiles && len(profiles) == 0 {
-		resp, err = d.Init(name, iremote, image, nil, configMap, devicesMap, c.init.ephem)
+		resp, err = d.Init(name, iremote, image, nil, configMap, devicesMap, storagePool, c.init.ephem)
 	} else {
-		resp, err = d.Init(name, iremote, image, &profiles, configMap, devicesMap, c.init.ephem)
+		resp, err = d.Init(name, iremote, image, &profiles, configMap, devicesMap, storagePool, c.init.ephem)
 	}
-
 	if err != nil {
 		return err
 	}

From a9a64b4198a3c339df3fd00aa2b26ca15f0e4d29 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 11:52:58 +0100
Subject: [PATCH 08/63] lxd/storage: add storageStringToType()

This function is the inverse of storageTypeToString().

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage.go | 31 +++++++++++++++++++++++++------
 1 file changed, 25 insertions(+), 6 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index d9e0e7c..91fc27b 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -99,19 +99,38 @@ const (
 	storageTypeMock
 )
 
-func storageTypeToString(sType storageType) string {
+func storageTypeToString(sType storageType) (string, error) {
 	switch sType {
 	case storageTypeBtrfs:
-		return "btrfs"
+		return "btrfs", nil
 	case storageTypeZfs:
-		return "zfs"
+		return "zfs", nil
 	case storageTypeLvm:
-		return "lvm"
+		return "lvm", nil
 	case storageTypeMock:
-		return "mock"
+		return "mock", nil
+	case storageTypeDir:
+		return "dir", nil
+	}
+
+	return "", fmt.Errorf("Invalid storage type.")
+}
+
+func storageStringToType(sName string) (storageType, error) {
+	switch sName {
+	case "btrfs":
+		return storageTypeBtrfs, nil
+	case "zfs":
+		return storageTypeZfs, nil
+	case "lvm":
+		return storageTypeLvm, nil
+	case "mock":
+		return storageTypeMock, nil
+	case "dir":
+		return storageTypeDir, nil
 	}
 
-	return "dir"
+	return -1, fmt.Errorf("Invalid storage type name.")
 }
 
 type MigrationStorageSourceDriver interface {

From fb107f6ac465169d50c5d87e892e8da90000d868 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 12:03:01 +0100
Subject: [PATCH 09/63] lxd/storage: abstract storage api

This implements the initial abstraction in our internal storage api needed to
create containers, images, and custom storage pools and volumes.

type storageCoreInfo interface:
storageCoreInfo requests a set of methods providing minimal information about
a given storage type (It is complemented by a type storageCore struct.):
- StorageCoreInit() (*storageCore, error)
  - Initialize a storageCoreInfo interface and storageCore struct.
- GetStorageType() storageType
  - internal storage type representation via our custom type
- GetStorageTypeName() string
  - storage name as string
- GetStorageTypeVersion() string
  - storage version: this specifically concerns the versions of tools etc.
    used to create a given storage type.

type storageCore struct:
- sType        storageType
  - internal representation of the storage type
- sTypeName    string
  - name of the storage type
- sTypeVersion string
  - version of the tool used to create storage pools and volumes
- log          shared.Logger
  - self-explanatory

Functions dealing with storage pools:
- StoragePoolInit(config map[string]interface{}) (storage, error)
- StoragePoolCheck(config map[string]interface{}) (storage, error)
- StoragePoolCreate() error
- StoragePoolDelete() error
- StoragePoolUpdate(changedConfig []string) error
- GetStoragePoolWritable() api.StoragePoolPut
- SetStoragePoolWritable(writable *api.StoragePoolPut)
- GetContainerPool() string

Functions dealing with storage volumes.
- StoragePoolVolumeCreate() error
- StoragePoolVolumeDelete() error
- StoragePoolVolumeUpdate(changedConfig []string) error
- GetStoragePoolVolumeWritable() api.StorageVolumePut
- SetStoragePoolVolumeWritable(writable *api.StorageVolumePut)

type storageShared struct:
- gains fields for storage pools and storage volumes

Initializer functions:
- storageWrapperInit(d *Daemon, poolName string, volumeName string, volumeType int) (*storageLogWrapper, error)
  - Simple helper function called by the various official internal
    initializers.
- storagePoolCoreInit(poolDriver string) (*storageCore, error)
  - Initialize a storageCoreInfo interface and storageCore struct of a given
    type.
- storagePoolInit(d *Daemon, poolName string) (storage, error)
  - Initialize a new storage pool.
- storagePoolVolumeInit(d *Daemon, poolName string, volumeName string, volumeType int) (storage, error) {
  - Initialize a new storage volume and the pool it is supposed to be on.
- storagePoolVolumeContainerCreateInit(d *Daemon, poolName string, containerName string) (storage, error) {
  - Initialize a storage interface suitable for creating a new storage volume
    for a container on a given storage pool.
- storagePoolVolumeContainerLoadInit(d *Daemon, containerName string) (storage, error) {
  - Initialize a storage interface for an already existing storage volume for a
    container on a given storage pool.
- storagePoolVolumeImageInit(d *Daemon, poolName string, imageFingerprint string) (storage, error)
  - Initialize a storage interface suitable for image handling.

Further changes:
All of the following methods move from storageShared to storageCore:
- initShared() error
- GetStorageType() storageType {
- GetStorageTypeName() string {
- GetStorageTypeVersion() string {

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage.go | 360 ++++++++++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 294 insertions(+), 66 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index 91fc27b..2daadd1 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -6,7 +6,6 @@ import (
 	"io"
 	"os"
 	"os/exec"
-	"path/filepath"
 	"reflect"
 	"syscall"
 	"time"
@@ -99,6 +98,8 @@ const (
 	storageTypeMock
 )
 
+var supportedStorageTypes = []string{"btrfs", "zfs", "lvm", "dir"}
+
 func storageTypeToString(sType storageType) (string, error) {
 	switch sType {
 	case storageTypeBtrfs:
@@ -155,12 +156,38 @@ type MigrationStorageSourceDriver interface {
 	Cleanup()
 }
 
-type storage interface {
-	Init(config map[string]interface{}) (storage, error)
-
+type storageCoreInfo interface {
+	StorageCoreInit() (*storageCore, error)
 	GetStorageType() storageType
 	GetStorageTypeName() string
 	GetStorageTypeVersion() string
+}
+
+// TODO(brauner): Split up this interace into sub-interfaces, that can be
+// combined into this single big interface but can also be individually
+// initialized. Suggestion:
+// - type storagePool interface
+// - type storagePoolVolume interface
+// - type storageContainer interface
+// - type storageImage interface
+type storage interface {
+	storageCoreInfo
+
+	// Functions dealing with storage pool.
+	StoragePoolInit(config map[string]interface{}) (storage, error)
+	StoragePoolCheck() error
+	StoragePoolCreate() error
+	StoragePoolDelete() error
+	StoragePoolUpdate(changedConfig []string) error
+	GetStoragePoolWritable() api.StoragePoolPut
+	SetStoragePoolWritable(writable *api.StoragePoolPut)
+
+	// Functions dealing with storage volumes.
+	StoragePoolVolumeCreate() error
+	StoragePoolVolumeDelete() error
+	StoragePoolVolumeUpdate(changedConfig []string) error
+	GetStoragePoolVolumeWritable() api.StorageVolumePut
+	SetStoragePoolVolumeWritable(writable *api.StorageVolumePut)
 
 	// ContainerCreate creates an empty container (no rootfs/metadata.yaml)
 	ContainerCreate(container container) error
@@ -177,6 +204,8 @@ type storage interface {
 	ContainerRestore(container container, sourceContainer container) error
 	ContainerSetQuota(container container, size int64) error
 	ContainerGetUsage(container container) (int64, error)
+	ContainerPoolGet() string
+	ContainerPoolIDGet() int64
 
 	ContainerSnapshotCreate(
 		snapshotContainer container, sourceContainer container) error
@@ -223,6 +252,153 @@ func newStorage(d *Daemon, sType storageType) (storage, error) {
 	return newStorageWithConfig(d, sType, nilmap)
 }
 
+func storageWrapperInit(d *Daemon, poolName string, volumeName string, volumeType int) (*storageLogWrapper, error) {
+	var s storageLogWrapper
+
+	// Load the storage pool.
+	poolID, pool, err := dbStoragePoolGet(d.db, poolName)
+	if err != nil {
+		return nil, err
+	}
+
+	driver := pool.PoolDriver
+	if driver == "" {
+		// This shouldn't actually be possible but better safe than
+		// sorry.
+		return nil, fmt.Errorf("No storage driver was provided.")
+	}
+
+	// Load the storage volume.
+	volume := &api.StorageVolume{}
+	if volumeName != "" && volumeType >= 0 {
+		_, volume, err = dbStoragePoolVolumeGetType(d.db, volumeName, volumeType, poolID)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	sType, err := storageStringToType(driver)
+	if err != nil {
+		return nil, err
+	}
+
+	switch sType {
+	case storageTypeBtrfs:
+		btrfs := storageBtrfs{}
+		btrfs.poolID = poolID
+		btrfs.pool = pool
+		btrfs.volume = volume
+		btrfs.d = d
+		s = storageLogWrapper{w: &btrfs}
+	case storageTypeZfs:
+		zfs := storageZfs{}
+		zfs.poolID = poolID
+		zfs.pool = pool
+		zfs.volume = volume
+		zfs.d = d
+		s = storageLogWrapper{w: &zfs}
+	case storageTypeLvm:
+		lvm := storageLvm{}
+		lvm.poolID = poolID
+		lvm.pool = pool
+		lvm.volume = volume
+		lvm.d = d
+		s = storageLogWrapper{w: &lvm}
+	case storageTypeDir:
+		dir := storageDir{}
+		dir.poolID = poolID
+		dir.pool = pool
+		dir.volume = volume
+		dir.d = d
+		s = storageLogWrapper{w: &dir}
+	}
+
+	return &s, nil
+}
+
+func storagePoolInit(d *Daemon, poolName string) (storage, error) {
+	var config map[string]interface{}
+
+	wrapper, err := storageWrapperInit(d, poolName, "", -1)
+	if err != nil {
+		return nil, err
+	}
+
+	storage, err := wrapper.StoragePoolInit(config)
+	if err != nil {
+		return nil, err
+	}
+
+	return storage, nil
+}
+
+func storagePoolCoreInit(poolDriver string) (*storageCore, error) {
+	sType, err := storageStringToType(poolDriver)
+	if err != nil {
+		return nil, err
+	}
+
+	var s storage
+	switch sType {
+	case storageTypeBtrfs:
+		btrfs := storageBtrfs{}
+		s = &storageLogWrapper{w: &btrfs}
+	case storageTypeZfs:
+		zfs := storageZfs{}
+		s = &storageLogWrapper{w: &zfs}
+	case storageTypeLvm:
+		lvm := storageLvm{}
+		s = &storageLogWrapper{w: &lvm}
+	case storageTypeDir:
+		dir := storageDir{}
+		s = &storageLogWrapper{w: &dir}
+	default:
+		return nil, fmt.Errorf("Unknown storage pool driver \"%s\".", poolDriver)
+	}
+
+	return s.StorageCoreInit()
+}
+
+func storagePoolVolumeImageInit(d *Daemon, poolName string, imageFingerprint string) (storage, error) {
+	return storagePoolVolumeInit(d, poolName, imageFingerprint, storagePoolVolumeTypeImage)
+}
+
+func storagePoolVolumeContainerCreateInit(d *Daemon, poolName string, containerName string) (storage, error) {
+	return storagePoolVolumeInit(d, poolName, containerName, storagePoolVolumeTypeContainer)
+}
+
+func storagePoolVolumeContainerLoadInit(d *Daemon, containerName string) (storage, error) {
+	// Get the storage pool of a given container.
+	poolName, err := dbContainerPool(d.db, containerName)
+	if err != nil {
+		return nil, err
+	}
+
+	return storagePoolVolumeInit(d, poolName, containerName, storagePoolVolumeTypeContainer)
+}
+
+func storagePoolVolumeInit(d *Daemon, poolName string, volumeName string, volumeType int) (storage, error) {
+	var config map[string]interface{}
+
+	// No need to detect storage here, its a new container.
+	wrapper, err := storageWrapperInit(d, poolName, volumeName, volumeType)
+	if err != nil {
+		return nil, err
+	}
+
+	storage, err := wrapper.StoragePoolInit(config)
+	if err != nil {
+		return nil, err
+	}
+
+	err = storage.StoragePoolCheck()
+	if err != nil {
+		return nil, err
+	}
+
+	return storage, nil
+}
+
 func newStorageWithConfig(d *Daemon, sType storageType, config map[string]interface{}) (storage, error) {
 	if d.MockMode {
 		return d.Storage, nil
@@ -236,102 +412,102 @@ func newStorageWithConfig(d *Daemon, sType storageType, config map[string]interf
 			return d.Storage, nil
 		}
 
-		s = &storageLogWrapper{w: &storageBtrfs{d: d}}
+		btrfs := storageBtrfs{}
+		btrfs.pool = &api.StoragePool{}
+		btrfs.d = d
+		s = &storageLogWrapper{w: &btrfs}
 	case storageTypeZfs:
 		if d.Storage != nil && d.Storage.GetStorageType() == storageTypeZfs {
 			return d.Storage, nil
 		}
 
-		s = &storageLogWrapper{w: &storageZfs{d: d}}
+		zfs := storageZfs{}
+		zfs.pool = &api.StoragePool{}
+		zfs.d = d
+		s = &storageLogWrapper{w: &zfs}
 	case storageTypeLvm:
 		if d.Storage != nil && d.Storage.GetStorageType() == storageTypeLvm {
 			return d.Storage, nil
 		}
 
-		s = &storageLogWrapper{w: &storageLvm{d: d}}
+		lvm := storageLvm{}
+		lvm.pool = &api.StoragePool{}
+		lvm.d = d
+		s = &storageLogWrapper{w: &lvm}
 	default:
 		if d.Storage != nil && d.Storage.GetStorageType() == storageTypeDir {
 			return d.Storage, nil
 		}
 
-		s = &storageLogWrapper{w: &storageDir{d: d}}
+		dir := storageDir{}
+		dir.pool = &api.StoragePool{}
+		dir.d = d
+		s = &storageLogWrapper{w: &dir}
 	}
 
-	return s.Init(config)
-}
-
-func storageForFilename(d *Daemon, filename string) (storage, error) {
-	var filesystem string
-	var err error
-
-	config := make(map[string]interface{})
-	storageType := storageTypeDir
-
-	if d.MockMode {
-		return newStorageWithConfig(d, storageTypeMock, config)
-	}
-
-	if shared.PathExists(filename) {
-		filesystem, err = filesystemDetect(filename)
-		if err != nil {
-			return nil, fmt.Errorf("couldn't detect filesystem for '%s': %v", filename, err)
-		}
-
-		if filesystem == "btrfs" {
-			if !(*storageBtrfs).isSubvolume(nil, filename) {
-				filesystem = ""
-			}
-		}
+	storage, err := s.StoragePoolInit(config)
+	if err != nil {
+		return nil, err
 	}
 
-	if shared.PathExists(filename + ".lv") {
-		storageType = storageTypeLvm
-		lvPath, err := os.Readlink(filename + ".lv")
-		if err != nil {
-			return nil, fmt.Errorf("couldn't read link dest for '%s': %v", filename+".lv", err)
-		}
-		vgname := filepath.Base(filepath.Dir(lvPath))
-		config["vgName"] = vgname
-	} else if shared.PathExists(filename + ".zfs") {
-		storageType = storageTypeZfs
-	} else if shared.PathExists(filename+".btrfs") || filesystem == "btrfs" {
-		storageType = storageTypeBtrfs
+	err = s.StoragePoolCheck()
+	if err != nil {
+		return nil, err
 	}
 
-	return newStorageWithConfig(d, storageType, config)
-}
-
-func storageForImage(d *Daemon, imgInfo *api.Image) (storage, error) {
-	imageFilename := shared.VarPath("images", imgInfo.Fingerprint)
-	return storageForFilename(d, imageFilename)
+	return storage, nil
 }
 
-type storageShared struct {
+type storageCore struct {
 	sType        storageType
 	sTypeName    string
 	sTypeVersion string
-
-	log shared.Logger
+	log          shared.Logger
 }
 
-func (ss *storageShared) initShared() error {
-	ss.log = logging.AddContext(
+func (sc *storageCore) initShared() error {
+	sc.log = logging.AddContext(
 		shared.Log,
-		log.Ctx{"driver": fmt.Sprintf("storage/%s", ss.sTypeName)},
+		log.Ctx{"driver": fmt.Sprintf("storage/%s", sc.sTypeName)},
 	)
 	return nil
 }
 
-func (ss *storageShared) GetStorageType() storageType {
-	return ss.sType
+// Return a storageCore struct that implements a storageCore interface. This
+// minimal interface only allows to retrieve basic information about the storage
+// type in question.
+func (lw *storageLogWrapper) StorageCoreInit() (*storageCore, error) {
+	sCore, err := lw.w.StorageCoreInit()
+	lw.log = logging.AddContext(
+		shared.Log,
+		log.Ctx{"driver": fmt.Sprintf("storage/%s", sCore.GetStorageTypeName())},
+	)
+
+	lw.log.Debug("StorageCoreInit")
+	return sCore, err
+}
+
+func (sc *storageCore) GetStorageType() storageType {
+	return sc.sType
+}
+
+func (sc *storageCore) GetStorageTypeName() string {
+	return sc.sTypeName
 }
 
-func (ss *storageShared) GetStorageTypeName() string {
-	return ss.sTypeName
+func (sc *storageCore) GetStorageTypeVersion() string {
+	return sc.sTypeVersion
 }
 
-func (ss *storageShared) GetStorageTypeVersion() string {
-	return ss.sTypeVersion
+type storageShared struct {
+	storageCore
+
+	d *Daemon
+
+	poolID int64
+	pool   *api.StoragePool
+
+	volume *api.StorageVolume
 }
 
 func (ss *storageShared) shiftRootfs(c container) error {
@@ -394,17 +570,21 @@ type storageLogWrapper struct {
 	log shared.Logger
 }
 
-func (lw *storageLogWrapper) Init(config map[string]interface{}) (storage, error) {
-	_, err := lw.w.Init(config)
+func (lw *storageLogWrapper) StoragePoolInit(config map[string]interface{}) (storage, error) {
+	_, err := lw.w.StoragePoolInit(config)
 	lw.log = logging.AddContext(
 		shared.Log,
 		log.Ctx{"driver": fmt.Sprintf("storage/%s", lw.w.GetStorageTypeName())},
 	)
 
-	lw.log.Debug("Init")
+	lw.log.Debug("StoragePoolInit")
 	return lw, err
 }
 
+func (lw *storageLogWrapper) StoragePoolCheck() error {
+	return lw.w.StoragePoolCheck()
+}
+
 func (lw *storageLogWrapper) GetStorageType() storageType {
 	return lw.w.GetStorageType()
 }
@@ -417,6 +597,54 @@ func (lw *storageLogWrapper) GetStorageTypeVersion() string {
 	return lw.w.GetStorageTypeVersion()
 }
 
+func (lw *storageLogWrapper) StoragePoolCreate() error {
+	return lw.w.StoragePoolCreate()
+}
+
+func (lw *storageLogWrapper) StoragePoolVolumeCreate() error {
+	return lw.w.StoragePoolVolumeCreate()
+}
+
+func (lw *storageLogWrapper) StoragePoolVolumeDelete() error {
+	return lw.w.StoragePoolVolumeDelete()
+}
+
+func (lw *storageLogWrapper) StoragePoolDelete() error {
+	return lw.w.StoragePoolDelete()
+}
+
+func (lw *storageLogWrapper) StoragePoolUpdate(changedConfig []string) error {
+	return lw.w.StoragePoolUpdate(changedConfig)
+}
+
+func (lw *storageLogWrapper) StoragePoolVolumeUpdate(changedConfig []string) error {
+	return lw.w.StoragePoolVolumeUpdate(changedConfig)
+}
+
+func (lw *storageLogWrapper) GetStoragePoolWritable() api.StoragePoolPut {
+	return lw.w.GetStoragePoolWritable()
+}
+
+func (lw *storageLogWrapper) GetStoragePoolVolumeWritable() api.StorageVolumePut {
+	return lw.w.GetStoragePoolVolumeWritable()
+}
+
+func (lw *storageLogWrapper) SetStoragePoolWritable(writable *api.StoragePoolPut) {
+	lw.w.SetStoragePoolWritable(writable)
+}
+
+func (lw *storageLogWrapper) SetStoragePoolVolumeWritable(writable *api.StorageVolumePut) {
+	lw.w.SetStoragePoolVolumeWritable(writable)
+}
+
+func (lw *storageLogWrapper) ContainerPoolGet() string {
+	return lw.w.ContainerPoolGet()
+}
+
+func (lw *storageLogWrapper) ContainerPoolIDGet() int64 {
+	return lw.w.ContainerPoolIDGet()
+}
+
 func (lw *storageLogWrapper) ContainerCreate(container container) error {
 	lw.log.Debug(
 		"ContainerCreate",

From ddee0ff2a18c343b20623f5fa99189eee4c9dbd7 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 12:24:59 +0100
Subject: [PATCH 10/63] lxd/storage_btrfs: implement new storage functions

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_btrfs.go | 200 ++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 135 insertions(+), 65 deletions(-)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 2b9f2ab..0119c49 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -17,41 +17,110 @@ import (
 	"github.com/pborman/uuid"
 
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
 
 	log "gopkg.in/inconshreveable/log15.v2"
 )
 
 type storageBtrfs struct {
-	d *Daemon
-
 	storageShared
 }
 
-func (s *storageBtrfs) Init(config map[string]interface{}) (storage, error) {
-	s.sType = storageTypeBtrfs
-	s.sTypeName = storageTypeToString(s.sType)
-	if err := s.initShared(); err != nil {
-		return s, err
+func (s *storageBtrfs) StorageCoreInit() (*storageCore, error) {
+	sCore := storageCore{}
+	sCore.sType = storageTypeBtrfs
+	typeName, err := storageTypeToString(sCore.sType)
+	if err != nil {
+		return nil, err
 	}
+	sCore.sTypeName = typeName
 
 	out, err := exec.LookPath("btrfs")
 	if err != nil || len(out) == 0 {
-		return s, fmt.Errorf("The 'btrfs' tool isn't available")
+		return nil, fmt.Errorf("The 'btrfs' tool isn't available")
 	}
 
 	output, err := exec.Command("btrfs", "version").CombinedOutput()
 	if err != nil {
-		return s, fmt.Errorf("The 'btrfs' tool isn't working properly")
+		return nil, fmt.Errorf("The 'btrfs' tool isn't working properly")
 	}
 
-	count, err := fmt.Sscanf(strings.SplitN(string(output), " ", 2)[1], "v%s\n", &s.sTypeVersion)
+	count, err := fmt.Sscanf(strings.SplitN(string(output), " ", 2)[1], "v%s\n", &sCore.sTypeVersion)
 	if err != nil || count != 1 {
-		return s, fmt.Errorf("The 'btrfs' tool isn't working properly")
+		return nil, fmt.Errorf("The 'btrfs' tool isn't working properly")
+	}
+
+	err = sCore.initShared()
+	if err != nil {
+		return nil, err
+	}
+
+	s.storageCore = sCore
+
+	return &sCore, nil
+}
+
+func (s *storageBtrfs) StoragePoolInit(config map[string]interface{}) (storage, error) {
+	_, err := s.StorageCoreInit()
+	if err != nil {
+		return s, err
 	}
 
 	return s, nil
 }
 
+func (s *storageBtrfs) StoragePoolCheck() error {
+	return nil
+}
+
+func (s *storageBtrfs) StoragePoolCreate() error {
+	return nil
+}
+
+func (s *storageBtrfs) StoragePoolVolumeCreate() error {
+	return nil
+}
+
+func (s *storageBtrfs) StoragePoolDelete() error {
+	return nil
+}
+
+func (s *storageBtrfs) StoragePoolVolumeDelete() error {
+	return nil
+}
+
+func (s *storageBtrfs) GetStoragePoolWritable() api.StoragePoolPut {
+	return s.pool.Writable()
+}
+
+func (s *storageBtrfs) GetStoragePoolVolumeWritable() api.StorageVolumePut {
+	return s.volume.Writable()
+}
+
+func (s *storageBtrfs) SetStoragePoolWritable(writable *api.StoragePoolPut) {
+	s.pool.StoragePoolPut = *writable
+}
+
+func (s *storageBtrfs) SetStoragePoolVolumeWritable(writable *api.StorageVolumePut) {
+	s.volume.StorageVolumePut = *writable
+}
+
+func (s *storageBtrfs) ContainerPoolGet() string {
+	return s.pool.PoolName
+}
+
+func (s *storageBtrfs) ContainerPoolIDGet() int64 {
+	return s.poolID
+}
+
+func (s *storageBtrfs) StoragePoolUpdate(changedConfig []string) error {
+	return nil
+}
+
+func (s *storageBtrfs) StoragePoolVolumeUpdate(changedConfig []string) error {
+	return nil
+}
+
 func (s *storageBtrfs) ContainerCreate(container container) error {
 	cPath := container.Path()
 
@@ -61,7 +130,7 @@ func (s *storageBtrfs) ContainerCreate(container container) error {
 	}
 
 	// Create the BTRFS Subvolume
-	err := s.subvolCreate(cPath)
+	err := s.btrfsPoolVolumeCreate(cPath)
 	if err != nil {
 		return err
 	}
@@ -90,7 +159,7 @@ func (s *storageBtrfs) ContainerCreateFromImage(
 	}
 
 	// Now make a snapshot of the image subvol
-	err := s.subvolsSnapshot(imageSubvol, container.Path(), false)
+	err := s.btrfsPoolVolumesSnapshot(imageSubvol, container.Path(), false)
 	if err != nil {
 		return err
 	}
@@ -117,8 +186,8 @@ func (s *storageBtrfs) ContainerDelete(container container) error {
 	cPath := container.Path()
 
 	// First remove the subvol (if it was one).
-	if s.isSubvolume(cPath) {
-		if err := s.subvolsDelete(cPath); err != nil {
+	if s.isBtrfsPoolVolume(cPath) {
+		if err := s.btrfsPoolVolumesDelete(cPath); err != nil {
 			return err
 		}
 	}
@@ -139,9 +208,9 @@ func (s *storageBtrfs) ContainerCopy(container container, sourceContainer contai
 	subvol := sourceContainer.Path()
 	dpath := container.Path()
 
-	if s.isSubvolume(subvol) {
+	if s.isBtrfsPoolVolume(subvol) {
 		// Snapshot the sourcecontainer
-		err := s.subvolsSnapshot(subvol, dpath, false)
+		err := s.btrfsPoolVolumesSnapshot(subvol, dpath, false)
 		if err != nil {
 			return err
 		}
@@ -214,15 +283,15 @@ func (s *storageBtrfs) ContainerRestore(
 	}
 
 	var failure error
-	if s.isSubvolume(sourceSubVol) {
+	if s.isBtrfsPoolVolume(sourceSubVol) {
 		// Restore using btrfs snapshots.
-		err := s.subvolsSnapshot(sourceSubVol, targetSubVol, false)
+		err := s.btrfsPoolVolumesSnapshot(sourceSubVol, targetSubVol, false)
 		if err != nil {
 			failure = err
 		}
 	} else {
 		// Restore using rsync but create a btrfs subvol.
-		if err := s.subvolCreate(targetSubVol); err == nil {
+		if err := s.btrfsPoolVolumeCreate(targetSubVol); err == nil {
 			output, err := storageRsyncCopy(
 				sourceSubVol,
 				targetSubVol)
@@ -250,8 +319,8 @@ func (s *storageBtrfs) ContainerRestore(
 		os.Rename(sourceBackupPath, container.Path())
 	} else {
 		// Remove the backup, we made
-		if s.isSubvolume(sourceBackupPath) {
-			return s.subvolsDelete(sourceBackupPath)
+		if s.isBtrfsPoolVolume(sourceBackupPath) {
+			return s.btrfsPoolVolumesDelete(sourceBackupPath)
 		}
 		os.RemoveAll(sourceBackupPath)
 	}
@@ -262,7 +331,7 @@ func (s *storageBtrfs) ContainerRestore(
 func (s *storageBtrfs) ContainerSetQuota(container container, size int64) error {
 	subvol := container.Path()
 
-	_, err := s.subvolQGroup(subvol)
+	_, err := s.btrfsPoolVolumeQGroup(subvol)
 	if err != nil {
 		return err
 	}
@@ -282,7 +351,7 @@ func (s *storageBtrfs) ContainerSetQuota(container container, size int64) error
 }
 
 func (s *storageBtrfs) ContainerGetUsage(container container) (int64, error) {
-	return s.subvolQGroupUsage(container.Path())
+	return s.btrfsPoolVolumeQGroupUsage(container.Path())
 }
 
 func (s *storageBtrfs) ContainerSnapshotCreate(
@@ -291,9 +360,9 @@ func (s *storageBtrfs) ContainerSnapshotCreate(
 	subvol := sourceContainer.Path()
 	dpath := snapshotContainer.Path()
 
-	if s.isSubvolume(subvol) {
+	if s.isBtrfsPoolVolume(subvol) {
 		// Create a readonly snapshot of the source.
-		err := s.subvolsSnapshot(subvol, dpath, true)
+		err := s.btrfsPoolVolumesSnapshot(subvol, dpath, true)
 		if err != nil {
 			s.ContainerSnapshotDelete(snapshotContainer)
 			return err
@@ -317,6 +386,7 @@ func (s *storageBtrfs) ContainerSnapshotCreate(
 
 	return nil
 }
+
 func (s *storageBtrfs) ContainerSnapshotDelete(
 	snapshotContainer container) error {
 
@@ -342,7 +412,7 @@ func (s *storageBtrfs) ContainerSnapshotStart(container container) error {
 		return err
 	}
 
-	err = s.subvolsSnapshot(container.Path()+".ro", container.Path(), false)
+	err = s.btrfsPoolVolumesSnapshot(container.Path()+".ro", container.Path(), false)
 	if err != nil {
 		return err
 	}
@@ -355,7 +425,7 @@ func (s *storageBtrfs) ContainerSnapshotStop(container container) error {
 		return fmt.Errorf("The snapshot isn't currently mounted read-write.")
 	}
 
-	err := s.subvolsDelete(container.Path())
+	err := s.btrfsPoolVolumesDelete(container.Path())
 	if err != nil {
 		return err
 	}
@@ -381,15 +451,15 @@ func (s *storageBtrfs) ContainerSnapshotRename(
 	}
 
 	// Now rename the snapshot.
-	if !s.isSubvolume(oldPath) {
+	if !s.isBtrfsPoolVolume(oldPath) {
 		if err := os.Rename(oldPath, newPath); err != nil {
 			return err
 		}
 	} else {
-		if err := s.subvolsSnapshot(oldPath, newPath, true); err != nil {
+		if err := s.btrfsPoolVolumesSnapshot(oldPath, newPath, true); err != nil {
 			return err
 		}
-		if err := s.subvolsDelete(oldPath); err != nil {
+		if err := s.btrfsPoolVolumesDelete(oldPath); err != nil {
 			return err
 		}
 	}
@@ -404,19 +474,19 @@ func (s *storageBtrfs) ContainerSnapshotRename(
 
 func (s *storageBtrfs) ContainerSnapshotCreateEmpty(snapshotContainer container) error {
 	dpath := snapshotContainer.Path()
-	return s.subvolCreate(dpath)
+	return s.btrfsPoolVolumeCreate(dpath)
 }
 
 func (s *storageBtrfs) ImageCreate(fingerprint string) error {
 	imagePath := shared.VarPath("images", fingerprint)
 	subvol := fmt.Sprintf("%s.btrfs", imagePath)
 
-	if err := s.subvolCreate(subvol); err != nil {
+	if err := s.btrfsPoolVolumeCreate(subvol); err != nil {
 		return err
 	}
 
 	if err := unpackImage(s.d, imagePath, subvol); err != nil {
-		s.subvolDelete(subvol)
+		s.btrfsPoolVolumeDelete(subvol)
 		return err
 	}
 
@@ -427,8 +497,8 @@ func (s *storageBtrfs) ImageDelete(fingerprint string) error {
 	imagePath := shared.VarPath("images", fingerprint)
 	subvol := fmt.Sprintf("%s.btrfs", imagePath)
 
-	if s.isSubvolume(subvol) {
-		if err := s.subvolsDelete(subvol); err != nil {
+	if s.isBtrfsPoolVolume(subvol) {
+		if err := s.btrfsPoolVolumesDelete(subvol); err != nil {
 			return err
 		}
 	}
@@ -436,7 +506,7 @@ func (s *storageBtrfs) ImageDelete(fingerprint string) error {
 	return nil
 }
 
-func (s *storageBtrfs) subvolCreate(subvol string) error {
+func (s *storageBtrfs) btrfsPoolVolumeCreate(subvol string) error {
 	parentDestPath := filepath.Dir(subvol)
 	if !shared.PathExists(parentDestPath) {
 		if err := os.MkdirAll(parentDestPath, 0700); err != nil {
@@ -464,7 +534,7 @@ func (s *storageBtrfs) subvolCreate(subvol string) error {
 	return nil
 }
 
-func (s *storageBtrfs) subvolQGroup(subvol string) (string, error) {
+func (s *storageBtrfs) btrfsPoolVolumeQGroup(subvol string) (string, error) {
 	output, err := exec.Command(
 		"btrfs",
 		"qgroup",
@@ -498,7 +568,7 @@ func (s *storageBtrfs) subvolQGroup(subvol string) (string, error) {
 	return qgroup, nil
 }
 
-func (s *storageBtrfs) subvolQGroupUsage(subvol string) (int64, error) {
+func (s *storageBtrfs) btrfsPoolVolumeQGroupUsage(subvol string) (int64, error) {
 	output, err := exec.Command(
 		"btrfs",
 		"qgroup",
@@ -532,9 +602,9 @@ func (s *storageBtrfs) subvolQGroupUsage(subvol string) (int64, error) {
 	return -1, fmt.Errorf("Unable to find current qgroup usage")
 }
 
-func (s *storageBtrfs) subvolDelete(subvol string) error {
+func (s *storageBtrfs) btrfsPoolVolumeDelete(subvol string) error {
 	// Attempt (but don't fail on) to delete any qgroup on the subvolume
-	qgroup, err := s.subvolQGroup(subvol)
+	qgroup, err := s.btrfsPoolVolumeQGroup(subvol)
 	if err == nil {
 		output, err := exec.Command(
 			"btrfs",
@@ -568,12 +638,12 @@ func (s *storageBtrfs) subvolDelete(subvol string) error {
 	return nil
 }
 
-// subvolsDelete is the recursive variant on subvolDelete,
+// btrfsPoolVolumesDelete is the recursive variant on btrfsPoolVolumeDelete,
 // it first deletes subvolumes of the subvolume and then the
 // subvolume itself.
-func (s *storageBtrfs) subvolsDelete(subvol string) error {
+func (s *storageBtrfs) btrfsPoolVolumesDelete(subvol string) error {
 	// Delete subsubvols.
-	subsubvols, err := s.getSubVolumes(subvol)
+	subsubvols, err := s.btrfsPoolVolumesGet(subvol)
 	if err != nil {
 		return err
 	}
@@ -585,13 +655,13 @@ func (s *storageBtrfs) subvolsDelete(subvol string) error {
 				"subvol":    subvol,
 				"subsubvol": subsubvol})
 
-		if err := s.subvolDelete(path.Join(subvol, subsubvol)); err != nil {
+		if err := s.btrfsPoolVolumeDelete(path.Join(subvol, subsubvol)); err != nil {
 			return err
 		}
 	}
 
 	// Delete the subvol itself
-	if err := s.subvolDelete(subvol); err != nil {
+	if err := s.btrfsPoolVolumeDelete(subvol); err != nil {
 		return err
 	}
 
@@ -599,10 +669,10 @@ func (s *storageBtrfs) subvolsDelete(subvol string) error {
 }
 
 /*
- * subvolSnapshot creates a snapshot of "source" to "dest"
+ * btrfsPoolVolumeSnapshot creates a snapshot of "source" to "dest"
  * the result will be readonly if "readonly" is True.
  */
-func (s *storageBtrfs) subvolSnapshot(
+func (s *storageBtrfs) btrfsPoolVolumeSnapshot(
 	source string, dest string, readonly bool) error {
 
 	parentDestPath := filepath.Dir(dest)
@@ -652,11 +722,11 @@ func (s *storageBtrfs) subvolSnapshot(
 	return err
 }
 
-func (s *storageBtrfs) subvolsSnapshot(
+func (s *storageBtrfs) btrfsPoolVolumesSnapshot(
 	source string, dest string, readonly bool) error {
 
 	// Get a list of subvolumes of the root
-	subsubvols, err := s.getSubVolumes(source)
+	subsubvols, err := s.btrfsPoolVolumesGet(source)
 	if err != nil {
 		return err
 	}
@@ -672,13 +742,13 @@ func (s *storageBtrfs) subvolsSnapshot(
 	}
 
 	// First snapshot the root
-	if err := s.subvolSnapshot(source, dest, readonly); err != nil {
+	if err := s.btrfsPoolVolumeSnapshot(source, dest, readonly); err != nil {
 		return err
 	}
 
 	// Now snapshot all subvolumes of the root.
 	for _, subsubvol := range subsubvols {
-		if err := s.subvolSnapshot(
+		if err := s.btrfsPoolVolumeSnapshot(
 			path.Join(source, subsubvol),
 			path.Join(dest, subsubvol),
 			readonly); err != nil {
@@ -691,10 +761,10 @@ func (s *storageBtrfs) subvolsSnapshot(
 }
 
 /*
- * isSubvolume returns true if the given Path is a btrfs subvolume
+ * isBtrfsPoolVolume returns true if the given Path is a btrfs subvolume
  * else false.
  */
-func (s *storageBtrfs) isSubvolume(subvolPath string) bool {
+func (s *storageBtrfs) isBtrfsPoolVolume(subvolPath string) bool {
 	fs := syscall.Stat_t{}
 	err := syscall.Lstat(subvolPath, &fs)
 	if err != nil {
@@ -710,7 +780,7 @@ func (s *storageBtrfs) isSubvolume(subvolPath string) bool {
 }
 
 // getSubVolumes returns a list of relative subvolume paths of "path".
-func (s *storageBtrfs) getSubVolumes(path string) ([]string, error) {
+func (s *storageBtrfs) btrfsPoolVolumesGet(path string) ([]string, error) {
 	result := []string{}
 
 	if !strings.HasSuffix(path, "/") {
@@ -735,7 +805,7 @@ func (s *storageBtrfs) getSubVolumes(path string) ([]string, error) {
 		}
 
 		// Check if a btrfs subvolume
-		if s.isSubvolume(fpath) {
+		if s.isBtrfsPoolVolume(fpath) {
 			result = append(result, strings.TrimPrefix(fpath, path))
 		}
 
@@ -810,11 +880,11 @@ func (s *btrfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *
 		}
 
 		btrfsPath := fmt.Sprintf("%s/.root", tmpPath)
-		if err := s.btrfs.subvolSnapshot(s.container.Path(), btrfsPath, true); err != nil {
+		if err := s.btrfs.btrfsPoolVolumeSnapshot(s.container.Path(), btrfsPath, true); err != nil {
 			return err
 		}
 
-		defer s.btrfs.subvolDelete(btrfsPath)
+		defer s.btrfs.btrfsPoolVolumeDelete(btrfsPath)
 
 		wrapper := StorageProgressReader(op, "fs_progress", s.container.Name())
 		return s.send(conn, btrfsPath, "", wrapper)
@@ -842,7 +912,7 @@ func (s *btrfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *
 	}
 
 	s.runningSnapName = fmt.Sprintf("%s/.root", tmpPath)
-	if err := s.btrfs.subvolSnapshot(s.container.Path(), s.runningSnapName, true); err != nil {
+	if err := s.btrfs.btrfsPoolVolumeSnapshot(s.container.Path(), s.runningSnapName, true); err != nil {
 		return err
 	}
 
@@ -863,7 +933,7 @@ func (s *btrfsMigrationSourceDriver) SendAfterCheckpoint(conn *websocket.Conn) e
 	}
 
 	s.stoppedSnapName = fmt.Sprintf("%s/.root", tmpPath)
-	if err := s.btrfs.subvolSnapshot(s.container.Path(), s.stoppedSnapName, true); err != nil {
+	if err := s.btrfs.btrfsPoolVolumeSnapshot(s.container.Path(), s.stoppedSnapName, true); err != nil {
 		return err
 	}
 
@@ -872,11 +942,11 @@ func (s *btrfsMigrationSourceDriver) SendAfterCheckpoint(conn *websocket.Conn) e
 
 func (s *btrfsMigrationSourceDriver) Cleanup() {
 	if s.stoppedSnapName != "" {
-		s.btrfs.subvolDelete(s.stoppedSnapName)
+		s.btrfs.btrfsPoolVolumeDelete(s.stoppedSnapName)
 	}
 
 	if s.runningSnapName != "" {
-		s.btrfs.subvolDelete(s.runningSnapName)
+		s.btrfs.btrfsPoolVolumeDelete(s.runningSnapName)
 	}
 }
 
@@ -945,7 +1015,7 @@ func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots [
 		cmd := exec.Command("btrfs", args...)
 
 		// Remove the existing pre-created subvolume
-		err := s.subvolsDelete(targetPath)
+		err := s.btrfsPoolVolumesDelete(targetPath)
 		if err != nil {
 			return err
 		}
@@ -985,13 +1055,13 @@ func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots [
 		if !isSnapshot {
 			cPath := containerPath(fmt.Sprintf("%s/.root", cName), true)
 
-			err := s.subvolSnapshot(cPath, targetPath, false)
+			err := s.btrfsPoolVolumeSnapshot(cPath, targetPath, false)
 			if err != nil {
 				shared.LogError("problem with btrfs snapshot", log.Ctx{"err": err})
 				return err
 			}
 
-			err = s.subvolsDelete(cPath)
+			err = s.btrfsPoolVolumesDelete(cPath)
 			if err != nil {
 				shared.LogError("problem with btrfs delete", log.Ctx{"err": err})
 				return err

From 640b11b181352eac5397be9de741b9e86a0a75ae Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 12:25:47 +0100
Subject: [PATCH 11/63] lxd/storage_dir: implement new storage functions

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_dir.go | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 79 insertions(+), 6 deletions(-)

diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index 5adcd45..936e357 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -10,26 +10,99 @@ import (
 	"github.com/gorilla/websocket"
 
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
 
 	log "gopkg.in/inconshreveable/log15.v2"
 )
 
 type storageDir struct {
-	d *Daemon
-
 	storageShared
 }
 
-func (s *storageDir) Init(config map[string]interface{}) (storage, error) {
-	s.sType = storageTypeDir
-	s.sTypeName = storageTypeToString(s.sType)
-	if err := s.initShared(); err != nil {
+// Only initialize the minimal information we need about a given storage type.
+func (s *storageDir) StorageCoreInit() (*storageCore, error) {
+	sCore := storageCore{}
+	sCore.sType = storageTypeDir
+	typeName, err := storageTypeToString(sCore.sType)
+	if err != nil {
+		return nil, err
+	}
+	sCore.sTypeName = typeName
+	sCore.sTypeVersion = "1"
+
+	err = sCore.initShared()
+	if err != nil {
+		return nil, err
+	}
+
+	s.storageCore = sCore
+
+	return &sCore, nil
+}
+
+// Initialize a full storage interface.
+func (s *storageDir) StoragePoolInit(config map[string]interface{}) (storage, error) {
+	_, err := s.StorageCoreInit()
+	if err != nil {
 		return s, err
 	}
 
 	return s, nil
 }
 
+// Initialize a full storage interface.
+func (s *storageDir) StoragePoolCheck() error {
+	return nil
+}
+
+func (s *storageDir) StoragePoolCreate() error {
+	return nil
+}
+
+func (s *storageDir) StoragePoolVolumeCreate() error {
+	return nil
+}
+
+func (s *storageDir) StoragePoolDelete() error {
+	return nil
+}
+
+func (s *storageDir) StoragePoolVolumeDelete() error {
+	return nil
+}
+
+func (s *storageDir) GetStoragePoolWritable() api.StoragePoolPut {
+	return s.pool.Writable()
+}
+
+func (s *storageDir) GetStoragePoolVolumeWritable() api.StorageVolumePut {
+	return s.volume.Writable()
+}
+
+func (s *storageDir) SetStoragePoolWritable(writable *api.StoragePoolPut) {
+	s.pool.StoragePoolPut = *writable
+}
+
+func (s *storageDir) SetStoragePoolVolumeWritable(writable *api.StorageVolumePut) {
+	s.volume.StorageVolumePut = *writable
+}
+
+func (s *storageDir) ContainerPoolGet() string {
+	return s.pool.PoolName
+}
+
+func (s *storageDir) ContainerPoolIDGet() int64 {
+	return s.poolID
+}
+
+func (s *storageDir) StoragePoolUpdate(changedConfig []string) error {
+	return nil
+}
+
+func (s *storageDir) StoragePoolVolumeUpdate(changedConfig []string) error {
+	return nil
+}
+
 func (s *storageDir) ContainerCreate(container container) error {
 	cPath := container.Path()
 	if err := os.MkdirAll(cPath, 0755); err != nil {

From a0daa5fcfe66d3085f16f3599299968926b88ab9 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 12:26:17 +0100
Subject: [PATCH 12/63] lxd/storage_lvm: implement new storage functions

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_lvm.go | 127 ++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 91 insertions(+), 36 deletions(-)

diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index a3bc02f..0bc9b71 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -13,6 +13,7 @@ import (
 	"github.com/gorilla/websocket"
 
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
 
 	log "gopkg.in/inconshreveable/log15.v2"
 )
@@ -155,18 +156,18 @@ func containerNameToLVName(containerName string) string {
 }
 
 type storageLvm struct {
-	d      *Daemon
-	vgName string
-
 	storageShared
 }
 
-func (s *storageLvm) Init(config map[string]interface{}) (storage, error) {
-	s.sType = storageTypeLvm
-	s.sTypeName = storageTypeToString(s.sType)
-	if err := s.initShared(); err != nil {
-		return s, err
+// Only initialize the minimal information we need about a given storage type.
+func (s *storageLvm) StorageCoreInit() (*storageCore, error) {
+	sCore := storageCore{}
+	sCore.sType = storageTypeLvm
+	typeName, err := storageTypeToString(sCore.sType)
+	if err != nil {
+		return nil, err
 	}
+	sCore.sTypeName = typeName
 
 	output, err := exec.Command("lvm", "version").CombinedOutput()
 	if err != nil {
@@ -174,35 +175,41 @@ func (s *storageLvm) Init(config map[string]interface{}) (storage, error) {
 	}
 	lines := strings.Split(string(output), "\n")
 
-	s.sTypeVersion = ""
+	sCore.sTypeVersion = ""
 	for idx, line := range lines {
 		fields := strings.SplitAfterN(line, ":", 2)
 		if len(fields) < 2 {
 			continue
 		}
 		if idx > 0 {
-			s.sTypeVersion += " / "
+			sCore.sTypeVersion += " / "
 		}
-		s.sTypeVersion += strings.TrimSpace(fields[1])
+		sCore.sTypeVersion += strings.TrimSpace(fields[1])
 	}
 
-	if config["vgName"] == nil {
-		vgName := daemonConfig["storage.lvm_vg_name"].Get()
-		if vgName == "" {
-			return s, fmt.Errorf("LVM isn't enabled")
-		}
+	err = sCore.initShared()
+	if err != nil {
+		return nil, err
+	}
 
-		if err := storageLVMCheckVolumeGroup(vgName); err != nil {
-			return s, err
-		}
-		s.vgName = vgName
-	} else {
-		s.vgName = config["vgName"].(string)
+	s.storageCore = sCore
+
+	return &sCore, nil
+}
+
+func (s *storageLvm) StoragePoolInit(config map[string]interface{}) (storage, error) {
+	_, err := s.StorageCoreInit()
+	if err != nil {
+		return s, err
 	}
 
 	return s, nil
 }
 
+func (s *storageLvm) StoragePoolCheck() error {
+	return nil
+}
+
 func versionSplit(versionString string) (int, int, int, error) {
 	fs := strings.Split(versionString, ".")
 	majs, mins, incs := fs[0], fs[1], fs[2]
@@ -245,6 +252,54 @@ func (s *storageLvm) lvmVersionIsAtLeast(versionString string) (bool, error) {
 
 }
 
+func (s *storageLvm) StoragePoolCreate() error {
+	return nil
+}
+
+func (s *storageLvm) StoragePoolVolumeCreate() error {
+	return nil
+}
+
+func (s *storageLvm) StoragePoolDelete() error {
+	return nil
+}
+
+func (s *storageLvm) StoragePoolVolumeDelete() error {
+	return nil
+}
+
+func (s *storageLvm) GetStoragePoolWritable() api.StoragePoolPut {
+	return s.pool.Writable()
+}
+
+func (s *storageLvm) GetStoragePoolVolumeWritable() api.StorageVolumePut {
+	return s.volume.Writable()
+}
+
+func (s *storageLvm) SetStoragePoolWritable(writable *api.StoragePoolPut) {
+	s.pool.StoragePoolPut = *writable
+}
+
+func (s *storageLvm) SetStoragePoolVolumeWritable(writable *api.StorageVolumePut) {
+	s.volume.StorageVolumePut = *writable
+}
+
+func (s *storageLvm) ContainerPoolGet() string {
+	return s.pool.PoolName
+}
+
+func (s *storageLvm) ContainerPoolIDGet() int64 {
+	return s.poolID
+}
+
+func (s *storageLvm) StoragePoolUpdate(changedConfig []string) error {
+	return nil
+}
+
+func (s *storageLvm) StoragePoolVolumeUpdate(changedConfig []string) error {
+	return nil
+}
+
 func (s *storageLvm) ContainerCreate(container container) error {
 	containerName := containerNameToLVName(container.Name())
 	lvpath, err := s.createThinLV(containerName)
@@ -428,7 +483,7 @@ func (s *storageLvm) ContainerCopy(container container, sourceContainer containe
 
 func (s *storageLvm) ContainerStart(name string, path string) error {
 	lvName := containerNameToLVName(name)
-	lvpath := fmt.Sprintf("/dev/%s/%s", s.vgName, lvName)
+	lvpath := fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, lvName)
 	fstype := daemonConfig["storage.lvm_fstype"].Get()
 
 	mountOptions := daemonConfig["storage.lvm_mount_options"].Get()
@@ -501,7 +556,7 @@ func (s *storageLvm) ContainerRename(
 		return err
 	}
 
-	err = os.Symlink(fmt.Sprintf("/dev/%s/%s", s.vgName, newName), newSymPath)
+	err = os.Symlink(fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, newName), newSymPath)
 	if err != nil {
 		return err
 	}
@@ -633,7 +688,7 @@ func (s *storageLvm) ContainerSnapshotRename(
 	}
 
 	// Create the symlink
-	err = os.Symlink(fmt.Sprintf("/dev/%s/%s", s.vgName, newName), newSymPath)
+	err = os.Symlink(fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, newName), newSymPath)
 	if err != nil {
 		return fmt.Errorf("Failed to create symlink: %s", err)
 	}
@@ -795,14 +850,14 @@ func (s *storageLvm) createDefaultThinPool() (string, error) {
 			"--poolmetadatasize", "1G",
 			"-l", "100%FREE",
 			"--thinpool",
-			fmt.Sprintf("%s/%s", s.vgName, thinPoolName))
+			fmt.Sprintf("%s/%s", s.pool.PoolName, thinPoolName))
 	} else {
 		output, err = tryExec(
 			"lvcreate",
 			"--poolmetadatasize", "1G",
 			"-L", "1G",
 			"--thinpool",
-			fmt.Sprintf("%s/%s", s.vgName, thinPoolName))
+			fmt.Sprintf("%s/%s", s.pool.PoolName, thinPoolName))
 	}
 
 	if err != nil {
@@ -823,7 +878,7 @@ func (s *storageLvm) createDefaultThinPool() (string, error) {
 			"lvextend",
 			"--alloc", "anywhere",
 			"-l", "100%FREE",
-			fmt.Sprintf("%s/%s", s.vgName, thinPoolName))
+			fmt.Sprintf("%s/%s", s.pool.PoolName, thinPoolName))
 
 		if err != nil {
 			s.log.Error(
@@ -871,13 +926,13 @@ func (s *storageLvm) createThinLV(lvname string) (string, error) {
 		"--thin",
 		"-n", lvname,
 		"--virtualsize", lvSize,
-		fmt.Sprintf("%s/%s", s.vgName, poolname))
+		fmt.Sprintf("%s/%s", s.pool.PoolName, poolname))
 	if err != nil {
 		s.log.Error("Could not create LV", log.Ctx{"lvname": lvname, "output": string(output)})
 		return "", fmt.Errorf("Could not create thin LV named %s", lvname)
 	}
 
-	lvpath := fmt.Sprintf("/dev/%s/%s", s.vgName, lvname)
+	lvpath := fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, lvname)
 
 	fstype := daemonConfig["storage.lvm_fstype"].Get()
 	switch fstype {
@@ -906,7 +961,7 @@ func (s *storageLvm) removeLV(lvname string) error {
 	var output []byte
 
 	output, err = tryExec(
-		"lvremove", "-f", fmt.Sprintf("%s/%s", s.vgName, lvname))
+		"lvremove", "-f", fmt.Sprintf("%s/%s", s.pool.PoolName, lvname))
 
 	if err != nil {
 		s.log.Error("Could not remove LV", log.Ctx{"lvname": lvname, "output": string(output)})
@@ -917,7 +972,7 @@ func (s *storageLvm) removeLV(lvname string) error {
 }
 
 func (s *storageLvm) createSnapshotLV(lvname string, origlvname string, readonly bool) (string, error) {
-	s.log.Debug("in createSnapshotLV:", log.Ctx{"lvname": lvname, "dev string": fmt.Sprintf("/dev/%s/%s", s.vgName, origlvname)})
+	s.log.Debug("in createSnapshotLV:", log.Ctx{"lvname": lvname, "dev string": fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, origlvname)})
 	isRecent, err := s.lvmVersionIsAtLeast("2.02.99")
 	if err != nil {
 		return "", fmt.Errorf("Error checking LVM version: %v", err)
@@ -928,19 +983,19 @@ func (s *storageLvm) createSnapshotLV(lvname string, origlvname string, readonly
 			"lvcreate",
 			"-kn",
 			"-n", lvname,
-			"-s", fmt.Sprintf("/dev/%s/%s", s.vgName, origlvname))
+			"-s", fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, origlvname))
 	} else {
 		output, err = tryExec(
 			"lvcreate",
 			"-n", lvname,
-			"-s", fmt.Sprintf("/dev/%s/%s", s.vgName, origlvname))
+			"-s", fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, origlvname))
 	}
 	if err != nil {
 		s.log.Error("Could not create LV snapshot", log.Ctx{"lvname": lvname, "origlvname": origlvname, "output": string(output)})
 		return "", fmt.Errorf("Could not create snapshot LV named %s", lvname)
 	}
 
-	snapshotFullName := fmt.Sprintf("/dev/%s/%s", s.vgName, lvname)
+	snapshotFullName := fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, lvname)
 
 	if readonly {
 		output, err = tryExec("lvchange", "-ay", "-pr", snapshotFullName)
@@ -960,7 +1015,7 @@ func (s *storageLvm) isLVMContainer(container container) bool {
 }
 
 func (s *storageLvm) renameLV(oldName string, newName string) (string, error) {
-	output, err := tryExec("lvrename", s.vgName, oldName, newName)
+	output, err := tryExec("lvrename", s.pool.PoolName, oldName, newName)
 	return string(output), err
 }
 

From 328bc5fa66cfc8fb369ea32dce20227b437b1b00 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 12:26:38 +0100
Subject: [PATCH 13/63] lxd/storage_zfs: implement new storage functions

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_zfs.go | 639 ++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 458 insertions(+), 181 deletions(-)

diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 022c42b..3fe22d7 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -6,6 +6,7 @@ import (
 	"io/ioutil"
 	"os"
 	"os/exec"
+	"path/filepath"
 	"strconv"
 	"strings"
 	"syscall"
@@ -14,81 +15,208 @@ import (
 	"github.com/gorilla/websocket"
 
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
 
 	"github.com/pborman/uuid"
 	log "gopkg.in/inconshreveable/log15.v2"
 )
 
 type storageZfs struct {
-	d       *Daemon
-	zfsPool string
-
 	storageShared
 }
 
-func (s *storageZfs) Init(config map[string]interface{}) (storage, error) {
-	s.sType = storageTypeZfs
-	s.sTypeName = storageTypeToString(s.sType)
+func (s *storageZfs) zfsIsEnabled() bool {
+	out, err := exec.LookPath("zfs")
+	if err != nil || len(out) == 0 {
+		return false
+	}
 
-	err := s.initShared()
+	return true
+}
+
+func (s *storageZfs) zfsModuleVersionGet() (string, error) {
+	zfsVersion, err := ioutil.ReadFile("/sys/module/zfs/version")
 	if err != nil {
-		return s, err
+		return "", fmt.Errorf("Could not determine ZFS module version.")
 	}
 
-	if config["zfsPool"] == nil {
-		zfsPool := daemonConfig["storage.zfs_pool_name"].Get()
-		if zfsPool == "" {
-			return s, fmt.Errorf("ZFS isn't enabled")
-		}
+	return strings.TrimSpace(string(zfsVersion)), nil
+}
 
-		s.zfsPool = zfsPool
-	} else {
-		s.zfsPool = config["zfsPool"].(string)
+// Only initialize the minimal information we need about a given storage type.
+func (s *storageZfs) StorageCoreInit() (*storageCore, error) {
+	sCore := storageCore{}
+	sCore.sType = storageTypeZfs
+	typeName, err := storageTypeToString(sCore.sType)
+	if err != nil {
+		return nil, err
 	}
+	sCore.sTypeName = typeName
 
-	out, err := exec.LookPath("zfs")
-	if err != nil || len(out) == 0 {
-		return s, fmt.Errorf("The 'zfs' tool isn't available")
+	if !s.zfsIsEnabled() {
+		return nil, fmt.Errorf("The \"zfs\" tool is not enabled.")
 	}
 
-	err = s.zfsCheckPool(s.zfsPool)
+	sCore.sTypeVersion, err = s.zfsModuleVersionGet()
 	if err != nil {
-		if shared.PathExists(shared.VarPath("zfs.img")) {
-			_ = loadModule("zfs")
+		return nil, err
+	}
 
-			output, err := exec.Command("zpool", "import",
-				"-d", shared.VarPath(), s.zfsPool).CombinedOutput()
-			if err != nil {
-				return s, fmt.Errorf("Unable to import the ZFS pool: %s", output)
+	err = sCore.initShared()
+	if err != nil {
+		return nil, err
+	}
+
+	s.storageCore = sCore
+
+	return &sCore, nil
+}
+
+// Functions dealing with storage pools.
+func (s *storageZfs) StoragePoolInit(config map[string]interface{}) (storage, error) {
+	_, err := s.StorageCoreInit()
+	if err != nil {
+		return s, err
+	}
+
+	if s.pool.PoolName == "" {
+		if config["zfsPool"] == nil {
+			zfsPool := daemonConfig["storage.zfs_pool_name"].Get()
+			if zfsPool == "" {
+				return s, fmt.Errorf("ZFS isn't enabled")
 			}
+
+			s.pool.PoolName = zfsPool
 		} else {
-			return s, err
+			s.pool.PoolName = config["zfsPool"].(string)
 		}
 	}
 
-	output, err := exec.Command("zfs", "get", "version", "-H", "-o", "value", s.zfsPool).CombinedOutput()
+	return s, nil
+}
+
+func (s *storageZfs) StoragePoolCheck() error {
+	err := s.zfsPoolCheck(s.pool.PoolName)
 	if err != nil {
-		return s, fmt.Errorf("The 'zfs' tool isn't working properly")
-	}
+		source := s.pool.PoolConfig["source"]
+		if shared.PathExists(source) {
+			_ = loadModule("zfs")
 
-	count, err := fmt.Sscanf(string(output), "%s\n", &s.sTypeVersion)
-	if err != nil || count != 1 {
-		return s, fmt.Errorf("The 'zfs' tool isn't working properly")
+			output, err := exec.Command("zpool", "import", source, s.pool.PoolName).CombinedOutput()
+			if err != nil {
+				return fmt.Errorf("Unable to import the ZFS pool: %s", output)
+			}
+		} else {
+			return err
+		}
 	}
 
-	output, err = exec.Command("zfs", "get", "mountpoint", "-H", "-o", "source", s.zfsPool).CombinedOutput()
+	output, err := exec.Command("zfs", "get", "mountpoint", "-H", "-o", "source", s.pool.PoolName).CombinedOutput()
 	if err != nil {
-		return s, fmt.Errorf("Unable to query ZFS mountpoint")
+		return fmt.Errorf("Unable to query ZFS mountpoint")
 	}
 
 	if strings.TrimSpace(string(output)) != "local" {
-		err = shared.RunCommand("zfs", "set", "mountpoint=none", s.zfsPool)
+		err = shared.RunCommand("zfs", "set", "mountpoint=none", s.pool.PoolName)
 		if err != nil {
-			return s, err
+			return err
 		}
 	}
 
-	return s, nil
+	return nil
+}
+
+func (s *storageZfs) StoragePoolCreate() error {
+	err := s.zfsPoolCreate()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (s *storageZfs) StoragePoolVolumeCreate() error {
+	fs := fmt.Sprintf("custom/%s", s.volume.VolumeName)
+	fsMountpoint := fmt.Sprintf("%s.zfs", shared.VarPath(fs))
+
+	err := s.zfsPoolVolumeCreate(fs)
+	if err != nil {
+		return err
+	}
+
+	err = s.zfsPoolVolumeSet(fs, "mountpoint", fsMountpoint)
+	if err != nil {
+		return err
+	}
+
+	if !shared.IsMountPoint(fsMountpoint) {
+		s.zfsPoolVolumeMount(fs)
+	}
+
+	return nil
+}
+
+func (s *storageZfs) StoragePoolDelete() error {
+	err := s.zfsPoolDelete()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (s *storageZfs) StoragePoolVolumeDelete() error {
+	fs := fmt.Sprintf("custom/%s", s.volume.VolumeName)
+	err := s.zfsPoolVolumeDestroy(fs)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (s *storageZfs) GetStoragePoolWritable() api.StoragePoolPut {
+	return s.pool.Writable()
+}
+
+func (s *storageZfs) GetStoragePoolVolumeWritable() api.StorageVolumePut {
+	return s.volume.Writable()
+}
+
+func (s *storageZfs) SetStoragePoolWritable(writable *api.StoragePoolPut) {
+	s.pool.StoragePoolPut = *writable
+}
+
+func (s *storageZfs) SetStoragePoolVolumeWritable(writable *api.StorageVolumePut) {
+	s.volume.StorageVolumePut = *writable
+}
+
+func (s *storageZfs) ContainerPoolGet() string {
+	return s.pool.PoolName
+}
+
+func (s *storageZfs) ContainerPoolIDGet() int64 {
+	return s.poolID
+}
+
+func (s *storageZfs) StoragePoolUpdate(changedConfig []string) error {
+	if shared.StringInSlice("driver", changedConfig) {
+		return fmt.Errorf("You cannot change the driver of a storage pool")
+	}
+
+	if shared.StringInSlice("size", changedConfig) {
+		return fmt.Errorf("You cannot change the size of a storage pool")
+	}
+
+	if shared.StringInSlice("source", changedConfig) {
+		return fmt.Errorf("You cannot change the source of a storage pool")
+	}
+
+	return nil
+}
+
+func (s *storageZfs) StoragePoolVolumeUpdate(changedConfig []string) error {
+	return nil
 }
 
 // Things we don't need to care about
@@ -97,7 +225,7 @@ func (s *storageZfs) ContainerStart(name string, path string) error {
 
 	// Just in case the container filesystem got unmounted
 	if !shared.IsMountPoint(shared.VarPath(fs)) {
-		s.zfsMount(fs)
+		s.zfsPoolVolumeMount(fs)
 	}
 
 	return nil
@@ -111,12 +239,25 @@ func (s *storageZfs) ContainerStop(name string, path string) error {
 func (s *storageZfs) ContainerCreate(container container) error {
 	cPath := container.Path()
 	fs := fmt.Sprintf("containers/%s", container.Name())
+	fsMountpoint := fmt.Sprintf("%s.zfs", shared.VarPath(fs))
 
-	err := s.zfsCreate(fs)
+	// Create volume.
+	err := s.zfsPoolVolumeCreate(fs)
 	if err != nil {
 		return err
 	}
 
+	// Set mountpoint.
+	err = s.zfsPoolVolumeSet(fs, "mountpoint", fsMountpoint)
+	if err != nil {
+		return err
+	}
+
+	// Check if it got automatically mounted.
+	if !shared.IsMountPoint(shared.VarPath(fs)) {
+		s.zfsPoolVolumeMount(fs)
+	}
+
 	err = os.Symlink(cPath+".zfs", cPath)
 	if err != nil {
 		return err
@@ -134,24 +275,20 @@ func (s *storageZfs) ContainerCreate(container container) error {
 		return err
 	}
 
-	return container.TemplateApply("create")
+	err = container.TemplateApply("create")
+	if err != nil {
+		return err
+	}
+
+	return nil
 }
 
 func (s *storageZfs) ContainerCreateFromImage(container container, fingerprint string) error {
 	cPath := container.Path()
-	imagePath := shared.VarPath("images", fingerprint)
-	subvol := fmt.Sprintf("%s.zfs", imagePath)
 	fs := fmt.Sprintf("containers/%s", container.Name())
 	fsImage := fmt.Sprintf("images/%s", fingerprint)
 
-	if !shared.PathExists(subvol) {
-		err := s.ImageCreate(fingerprint)
-		if err != nil {
-			return err
-		}
-	}
-
-	err := s.zfsClone(fsImage, "readonly", fs, true)
+	err := s.zfsPoolVolumeClone(fsImage, "readonly", fs, true)
 	if err != nil {
 		return err
 	}
@@ -179,8 +316,12 @@ func (s *storageZfs) ContainerCreateFromImage(container container, fingerprint s
 			return err
 		}
 	}
+	err = container.TemplateApply("create")
+	if err != nil {
+		return err
+	}
 
-	return container.TemplateApply("create")
+	return nil
 }
 
 func (s *storageZfs) ContainerCanRestore(container container, sourceContainer container) error {
@@ -203,16 +344,16 @@ func (s *storageZfs) ContainerCanRestore(container container, sourceContainer co
 func (s *storageZfs) ContainerDelete(container container) error {
 	fs := fmt.Sprintf("containers/%s", container.Name())
 
-	if s.zfsExists(fs) {
+	if s.zfsPoolVolumeExists(fs) {
 		removable := true
-		snaps, err := s.zfsListSnapshots(fs)
+		snaps, err := s.zfsPoolListSnapshots(fs)
 		if err != nil {
 			return err
 		}
 
 		for _, snap := range snaps {
 			var err error
-			removable, err = s.zfsSnapshotRemovable(fs, snap)
+			removable, err = s.zfsPoolVolumeSnapshotRemovable(fs, snap)
 			if err != nil {
 				return err
 			}
@@ -223,28 +364,28 @@ func (s *storageZfs) ContainerDelete(container container) error {
 		}
 
 		if removable {
-			origin, err := s.zfsGet(fs, "origin")
+			origin, err := s.zfsPoolVolumeGet(fs, "origin")
 			if err != nil {
 				return err
 			}
-			origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.zfsPool))
+			origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.pool.PoolName))
 
-			err = s.zfsDestroy(fs)
+			err = s.zfsPoolVolumeDestroy(fs)
 			if err != nil {
 				return err
 			}
 
-			err = s.zfsCleanup(origin)
+			err = s.zfsPoolVolumeCleanup(origin)
 			if err != nil {
 				return err
 			}
 		} else {
-			err := s.zfsSet(fs, "mountpoint", "none")
+			err := s.zfsPoolVolumeSet(fs, "mountpoint", "none")
 			if err != nil {
 				return err
 			}
 
-			err = s.zfsRename(fs, fmt.Sprintf("deleted/containers/%s", uuid.NewRandom().String()))
+			err = s.zfsPoolVolumeRename(fs, fmt.Sprintf("deleted/containers/%s", uuid.NewRandom().String()))
 			if err != nil {
 				return err
 			}
@@ -265,7 +406,7 @@ func (s *storageZfs) ContainerDelete(container container) error {
 		}
 	}
 
-	s.zfsDestroy(fmt.Sprintf("snapshots/%s", container.Name()))
+	s.zfsPoolVolumeDestroy(fmt.Sprintf("snapshots/%s", container.Name()))
 
 	return nil
 }
@@ -285,23 +426,23 @@ func (s *storageZfs) ContainerCopy(container container, sourceContainer containe
 	}
 
 	if sourceSnap == "" {
-		if s.zfsExists(fmt.Sprintf("containers/%s", sourceName)) {
+		if s.zfsPoolVolumeExists(fmt.Sprintf("containers/%s", sourceName)) {
 			sourceSnap = fmt.Sprintf("copy-%s", uuid.NewRandom().String())
 			sourceFs = fmt.Sprintf("containers/%s", sourceName)
-			err := s.zfsSnapshotCreate(fmt.Sprintf("containers/%s", sourceName), sourceSnap)
+			err := s.zfsPoolVolumeSnapshotCreate(fmt.Sprintf("containers/%s", sourceName), sourceSnap)
 			if err != nil {
 				return err
 			}
 		}
 	} else {
-		if s.zfsExists(fmt.Sprintf("containers/%s at snapshot-%s", sourceName, sourceSnap)) {
+		if s.zfsPoolVolumeExists(fmt.Sprintf("containers/%s at snapshot-%s", sourceName, sourceSnap)) {
 			sourceFs = fmt.Sprintf("containers/%s", sourceName)
 			sourceSnap = fmt.Sprintf("snapshot-%s", sourceSnap)
 		}
 	}
 
 	if sourceFs != "" {
-		err := s.zfsClone(sourceFs, sourceSnap, destFs, true)
+		err := s.zfsPoolVolumeClone(sourceFs, sourceSnap, destFs, true)
 		if err != nil {
 			return err
 		}
@@ -342,25 +483,25 @@ func (s *storageZfs) ContainerRename(container container, newName string) error
 	oldName := container.Name()
 
 	// Unmount the filesystem
-	err := s.zfsUnmount(fmt.Sprintf("containers/%s", oldName))
+	err := s.zfsPoolVolumeUnmount(fmt.Sprintf("containers/%s", oldName))
 	if err != nil {
 		return err
 	}
 
 	// Rename the filesystem
-	err = s.zfsRename(fmt.Sprintf("containers/%s", oldName), fmt.Sprintf("containers/%s", newName))
+	err = s.zfsPoolVolumeRename(fmt.Sprintf("containers/%s", oldName), fmt.Sprintf("containers/%s", newName))
 	if err != nil {
 		return err
 	}
 
 	// Update to the new mountpoint
-	err = s.zfsSet(fmt.Sprintf("containers/%s", newName), "mountpoint", shared.VarPath(fmt.Sprintf("containers/%s.zfs", newName)))
+	err = s.zfsPoolVolumeSet(fmt.Sprintf("containers/%s", newName), "mountpoint", shared.VarPath(fmt.Sprintf("containers/%s.zfs", newName)))
 	if err != nil {
 		return err
 	}
 
 	// In case ZFS didn't mount the filesystem, do it ourselves
-	err = s.zfsMount(fmt.Sprintf("containers/%s", newName))
+	err = s.zfsPoolVolumeMount(fmt.Sprintf("containers/%s", newName))
 	if err != nil {
 		return err
 	}
@@ -419,7 +560,7 @@ func (s *storageZfs) ContainerRestore(container container, sourceContainer conta
 	cName := fields[0]
 	snapName := fmt.Sprintf("snapshot-%s", fields[1])
 
-	err = s.zfsSnapshotRestore(fmt.Sprintf("containers/%s", cName), snapName)
+	err = s.zfsPoolVolumeSnapshotRestore(fmt.Sprintf("containers/%s", cName), snapName)
 	if err != nil {
 		return err
 	}
@@ -438,9 +579,9 @@ func (s *storageZfs) ContainerSetQuota(container container, size int64) error {
 	}
 
 	if size > 0 {
-		err = s.zfsSet(fs, property, fmt.Sprintf("%d", size))
+		err = s.zfsPoolVolumeSet(fs, property, fmt.Sprintf("%d", size))
 	} else {
-		err = s.zfsSet(fs, property, "none")
+		err = s.zfsPoolVolumeSet(fs, property, "none")
 	}
 
 	if err != nil {
@@ -460,7 +601,7 @@ func (s *storageZfs) ContainerGetUsage(container container) (int64, error) {
 		property = "usedbydataset"
 	}
 
-	value, err := s.zfsGet(fs, property)
+	value, err := s.zfsPoolVolumeGet(fs, property)
 	if err != nil {
 		return -1, err
 	}
@@ -478,7 +619,7 @@ func (s *storageZfs) ContainerSnapshotCreate(snapshotContainer container, source
 	cName := fields[0]
 	snapName := fmt.Sprintf("snapshot-%s", fields[1])
 
-	err := s.zfsSnapshotCreate(fmt.Sprintf("containers/%s", cName), snapName)
+	err := s.zfsPoolVolumeSnapshotCreate(fmt.Sprintf("containers/%s", cName), snapName)
 	if err != nil {
 		return err
 	}
@@ -503,15 +644,15 @@ func (s *storageZfs) ContainerSnapshotDelete(snapshotContainer container) error
 	cName := fields[0]
 	snapName := fmt.Sprintf("snapshot-%s", fields[1])
 
-	if s.zfsExists(fmt.Sprintf("containers/%s@%s", cName, snapName)) {
-		removable, err := s.zfsSnapshotRemovable(fmt.Sprintf("containers/%s", cName), snapName)
+	if s.zfsPoolVolumeExists(fmt.Sprintf("containers/%s@%s", cName, snapName)) {
+		removable, err := s.zfsPoolVolumeSnapshotRemovable(fmt.Sprintf("containers/%s", cName), snapName)
 		if removable {
-			err = s.zfsSnapshotDestroy(fmt.Sprintf("containers/%s", cName), snapName)
+			err = s.zfsPoolVolumeSnapshotDestroy(fmt.Sprintf("containers/%s", cName), snapName)
 			if err != nil {
 				return err
 			}
 		} else {
-			err = s.zfsSnapshotRename(fmt.Sprintf("containers/%s", cName), snapName, fmt.Sprintf("copy-%s", uuid.NewRandom().String()))
+			err = s.zfsPoolVolumeSnapshotRename(fmt.Sprintf("containers/%s", cName), snapName, fmt.Sprintf("copy-%s", uuid.NewRandom().String()))
 			if err != nil {
 				return err
 			}
@@ -547,7 +688,7 @@ func (s *storageZfs) ContainerSnapshotRename(snapshotContainer container, newNam
 	newName = fmt.Sprintf("snapshot-%s", newFields[1])
 
 	if oldName != newName {
-		err := s.zfsSnapshotRename(fmt.Sprintf("containers/%s", oldcName), oldName, newName)
+		err := s.zfsPoolVolumeSnapshotRename(fmt.Sprintf("containers/%s", oldcName), oldName, newName)
 		if err != nil {
 			return err
 		}
@@ -593,7 +734,7 @@ func (s *storageZfs) ContainerSnapshotStart(container container) error {
 	sourceSnap := fmt.Sprintf("snapshot-%s", sName)
 	destFs := fmt.Sprintf("snapshots/%s/%s", cName, sName)
 
-	err := s.zfsClone(sourceFs, sourceSnap, destFs, false)
+	err := s.zfsPoolVolumeClone(sourceFs, sourceSnap, destFs, false)
 	if err != nil {
 		return err
 	}
@@ -610,7 +751,7 @@ func (s *storageZfs) ContainerSnapshotStop(container container) error {
 	sName := fields[1]
 	destFs := fmt.Sprintf("snapshots/%s/%s", cName, sName)
 
-	err := s.zfsDestroy(destFs)
+	err := s.zfsPoolVolumeDestroy(destFs)
 	if err != nil {
 		return err
 	}
@@ -626,17 +767,23 @@ func (s *storageZfs) ContainerSnapshotCreateEmpty(snapshotContainer container) e
 }
 
 func (s *storageZfs) ImageCreate(fingerprint string) error {
+	// Create temporary mountpoint directory.
+	tmpImageDir, err := ioutil.TempDir(shared.VarPath("images"), "lxd_images_")
+	if err != nil {
+		return err
+	}
+	defer os.RemoveAll(tmpImageDir)
+
 	imagePath := shared.VarPath("images", fingerprint)
-	subvol := fmt.Sprintf("%s.zfs", imagePath)
 	fs := fmt.Sprintf("images/%s", fingerprint)
 
-	if s.zfsExists(fmt.Sprintf("deleted/%s", fs)) {
-		err := s.zfsRename(fmt.Sprintf("deleted/%s", fs), fs)
+	if s.zfsPoolVolumeExists(fmt.Sprintf("deleted/%s", fs)) {
+		err := s.zfsPoolVolumeRename(fmt.Sprintf("deleted/%s", fs), fs)
 		if err != nil {
 			return err
 		}
 
-		err = s.zfsSet(fs, "mountpoint", subvol)
+		err = s.zfsPoolVolumeSet(fs, "mountpoint", "none")
 		if err != nil {
 			return err
 		}
@@ -644,34 +791,57 @@ func (s *storageZfs) ImageCreate(fingerprint string) error {
 		return nil
 	}
 
-	err := s.zfsCreate(fs)
+	// Create a new storage volume on the storage pool for the image.
+	err = s.zfsPoolVolumeCreate(fs)
+	if err != nil {
+		return err
+	}
+
+	// Set a temporary mountpoint for the image.
+	err = s.zfsPoolVolumeSet(fs, "mountpoint", tmpImageDir)
 	if err != nil {
 		return err
 	}
 
+	// Make sure that the image actually got mounted.
+	if !shared.IsMountPoint(tmpImageDir) {
+		s.zfsPoolVolumeMount(fs)
+	}
+
+	// Register a cleanup function.
 	cleanup := func(err error) error {
-		if zerr := s.zfsDestroy(fs); zerr != nil {
+		if zerr := s.zfsPoolVolumeDestroy(fs); zerr != nil {
 			err = fmt.Errorf("%s  During cleanup: %s", err, zerr)
 		}
-		if shared.PathExists(subvol) {
-			if oserr := os.Remove(subvol); oserr != nil {
-				err = fmt.Errorf("%s  During cleanup: Failed to remove sub-volume %s, %s", err, subvol, oserr)
-			}
-		}
 		return err
 	}
 
-	err = unpackImage(s.d, imagePath, subvol)
+	// Unpack the image into the temporary mountpoint.
+	err = unpackImage(s.d, imagePath, tmpImageDir)
+	if err != nil {
+		return cleanup(err)
+	}
+
+	// Mark the new storage volume for the image as readonly.
+	err = s.zfsPoolVolumeSet(fs, "readonly", "on")
 	if err != nil {
 		return cleanup(err)
 	}
 
-	err = s.zfsSet(fs, "readonly", "on")
+	// Remove the temporary mountpoint from the image storage volume.
+	err = s.zfsPoolVolumeSet(fs, "mountpoint", "none")
 	if err != nil {
 		return cleanup(err)
 	}
 
-	err = s.zfsSnapshotCreate(fs, "readonly")
+	// Make sure that the image actually got unmounted.
+	if shared.IsMountPoint(tmpImageDir) {
+		s.zfsPoolVolumeUnmount(fs)
+	}
+
+	// Create a snapshot of that image on the storage pool which we clone for
+	// container creation.
+	err = s.zfsPoolVolumeSnapshotCreate(fs, "readonly")
 	if err != nil {
 		return cleanup(err)
 	}
@@ -682,24 +852,24 @@ func (s *storageZfs) ImageCreate(fingerprint string) error {
 func (s *storageZfs) ImageDelete(fingerprint string) error {
 	fs := fmt.Sprintf("images/%s", fingerprint)
 
-	if s.zfsExists(fs) {
-		removable, err := s.zfsSnapshotRemovable(fs, "readonly")
+	if s.zfsPoolVolumeExists(fs) {
+		removable, err := s.zfsPoolVolumeSnapshotRemovable(fs, "readonly")
 		if err != nil {
 			return err
 		}
 
 		if removable {
-			err := s.zfsDestroy(fs)
+			err := s.zfsPoolVolumeDestroy(fs)
 			if err != nil {
 				return err
 			}
 		} else {
-			err := s.zfsSet(fs, "mountpoint", "none")
+			err := s.zfsPoolVolumeSet(fs, "mountpoint", "none")
 			if err != nil {
 				return err
 			}
 
-			err = s.zfsRename(fs, fmt.Sprintf("deleted/%s", fs))
+			err = s.zfsPoolVolumeRename(fs, fmt.Sprintf("deleted/%s", fs))
 			if err != nil {
 				return err
 			}
@@ -707,7 +877,7 @@ func (s *storageZfs) ImageDelete(fingerprint string) error {
 	}
 
 	if shared.PathExists(shared.VarPath(fs + ".zfs")) {
-		err := os.Remove(shared.VarPath(fs + ".zfs"))
+		err := os.RemoveAll(shared.VarPath(fs + ".zfs"))
 		if err != nil {
 			return err
 		}
@@ -717,7 +887,7 @@ func (s *storageZfs) ImageDelete(fingerprint string) error {
 }
 
 // Helper functions
-func (s *storageZfs) zfsCheckPool(pool string) error {
+func (s *storageZfs) zfsPoolCheck(pool string) error {
 	output, err := exec.Command(
 		"zfs", "get", "type", "-H", "-o", "value", pool).CombinedOutput()
 	if err != nil {
@@ -732,7 +902,77 @@ func (s *storageZfs) zfsCheckPool(pool string) error {
 	return nil
 }
 
-func (s *storageZfs) zfsClone(source string, name string, dest string, dotZfs bool) error {
+func (s *storageZfs) zfsPoolCreate() error {
+	vdev := s.pool.PoolConfig["source"]
+	if vdev == "" {
+		vdev = filepath.Join(shared.VarPath("disks"), s.pool.PoolName)
+	}
+
+	if !filepath.IsAbs(vdev) {
+		// Probably a zpool or zfs dataset.
+		if err := s.zfsPoolCheck(vdev); err != nil {
+			return err
+		}
+
+		// Confirm that the pool is empty.
+		subvols, err := s.zfsPoolListSubvolumes(vdev)
+		if err != nil {
+			return err
+		}
+
+		if len(subvols) > 0 {
+			return fmt.Errorf("Provided ZFS pool (or dataset) isn't empty")
+		}
+
+		return nil
+	} else {
+		if !shared.IsBlockdevPath(vdev) {
+			vdev = vdev + ".img"
+			s.pool.PoolConfig["source"] = vdev
+			// This is likely a loop file.
+			f, err := os.Create(vdev)
+			if err != nil {
+				return fmt.Errorf("Failed to open %s: %s", vdev, err)
+			}
+
+			err = f.Chmod(0600)
+			if err != nil {
+				return fmt.Errorf("Failed to chmod %s: %s", vdev, err)
+			}
+
+			size, err := strconv.ParseInt(s.pool.PoolConfig["size"], 10, 64)
+			if err != nil {
+				return err
+			}
+			err = f.Truncate(size)
+			if err != nil {
+				return fmt.Errorf("Failed to create sparse file %s: %s", vdev, err)
+			}
+
+			err = f.Close()
+			if err != nil {
+				return fmt.Errorf("Failed to close %s: %s", vdev, err)
+			}
+		}
+	}
+
+	zpoolName := s.pool.PoolConfig["zfs.pool_name"]
+	if zpoolName == "" {
+		zpoolName = s.pool.PoolName
+	}
+
+	output, err := exec.Command(
+		"zpool",
+		"create", zpoolName, vdev,
+		"-f", "-m", "none", "-O", "compression=on").CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("Failed to create the ZFS pool: %s", output)
+	}
+
+	return nil
+}
+
+func (s *storageZfs) zfsPoolVolumeClone(source string, name string, dest string, dotZfs bool) error {
 	var mountpoint string
 
 	mountpoint = shared.VarPath(dest)
@@ -745,20 +985,20 @@ func (s *storageZfs) zfsClone(source string, name string, dest string, dotZfs bo
 		"clone",
 		"-p",
 		"-o", fmt.Sprintf("mountpoint=%s", mountpoint),
-		fmt.Sprintf("%s/%s@%s", s.zfsPool, source, name),
-		fmt.Sprintf("%s/%s", s.zfsPool, dest)).CombinedOutput()
+		fmt.Sprintf("%s/%s@%s", s.pool.PoolName, source, name),
+		fmt.Sprintf("%s/%s", s.pool.PoolName, dest)).CombinedOutput()
 	if err != nil {
 		s.log.Error("zfs clone failed", log.Ctx{"output": string(output)})
 		return fmt.Errorf("Failed to clone the filesystem: %s", output)
 	}
 
-	subvols, err := s.zfsListSubvolumes(source)
+	subvols, err := s.zfsPoolListSubvolumes(source)
 	if err != nil {
 		return err
 	}
 
 	for _, sub := range subvols {
-		snaps, err := s.zfsListSnapshots(sub)
+		snaps, err := s.zfsPoolListSnapshots(sub)
 		if err != nil {
 			return err
 		}
@@ -778,8 +1018,8 @@ func (s *storageZfs) zfsClone(source string, name string, dest string, dotZfs bo
 			"clone",
 			"-p",
 			"-o", fmt.Sprintf("mountpoint=%s", mountpoint),
-			fmt.Sprintf("%s/%s@%s", s.zfsPool, sub, name),
-			fmt.Sprintf("%s/%s", s.zfsPool, destSubvol)).CombinedOutput()
+			fmt.Sprintf("%s/%s@%s", s.pool.PoolName, sub, name),
+			fmt.Sprintf("%s/%s", s.pool.PoolName, destSubvol)).CombinedOutput()
 		if err != nil {
 			s.log.Error("zfs clone failed", log.Ctx{"output": string(output)})
 			return fmt.Errorf("Failed to clone the sub-volume: %s", output)
@@ -789,13 +1029,12 @@ func (s *storageZfs) zfsClone(source string, name string, dest string, dotZfs bo
 	return nil
 }
 
-func (s *storageZfs) zfsCreate(path string) error {
+func (s *storageZfs) zfsPoolVolumeCreate(path string) error {
 	output, err := exec.Command(
 		"zfs",
 		"create",
 		"-p",
-		"-o", fmt.Sprintf("mountpoint=%s.zfs", shared.VarPath(path)),
-		fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput()
+		fmt.Sprintf("%s/%s", s.pool.PoolName, path)).CombinedOutput()
 	if err != nil {
 		s.log.Error("zfs create failed", log.Ctx{"output": string(output)})
 		return fmt.Errorf("Failed to create ZFS filesystem: %s", output)
@@ -804,8 +1043,45 @@ func (s *storageZfs) zfsCreate(path string) error {
 	return nil
 }
 
-func (s *storageZfs) zfsDestroy(path string) error {
-	mountpoint, err := s.zfsGet(path, "mountpoint")
+// func (s *storageZfs) zfsPoolVolumeCreate(path string) error {
+// 	output, err := exec.Command(
+// 		"zfs",
+// 		"create",
+// 		"-p",
+// 		"-o", fmt.Sprintf("mountpoint=%s.zfs", shared.VarPath(path)),
+// 		fmt.Sprintf("%s/%s", s.pool.PoolName, path)).CombinedOutput()
+// 	if err != nil {
+// 		s.log.Error("zfs create failed", log.Ctx{"output": string(output)})
+// 		return fmt.Errorf("Failed to create ZFS filesystem: %s", output)
+// 	}
+//
+// 	return nil
+// }
+
+func (s *storageZfs) zfsPoolDelete() error {
+	zpoolName := s.pool.PoolConfig["zfs.pool_name"]
+	if zpoolName == "" {
+		zpoolName = s.pool.PoolName
+	}
+
+	output, err := exec.Command(
+		"zpool",
+		"destroy", zpoolName).CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("Failed to delete the ZFS pool: %s", output)
+	}
+
+	// Cleanup storage
+	vdev := s.pool.PoolConfig["source"]
+	if filepath.IsAbs(vdev) && !shared.IsBlockdevPath(vdev) {
+		os.RemoveAll(vdev)
+	}
+
+	return nil
+}
+
+func (s *storageZfs) zfsPoolVolumeDestroy(path string) error {
+	mountpoint, err := s.zfsPoolVolumeGet(path, "mountpoint")
 	if err != nil {
 		return err
 	}
@@ -823,7 +1099,7 @@ func (s *storageZfs) zfsDestroy(path string) error {
 		"zfs",
 		"destroy",
 		"-r",
-		fmt.Sprintf("%s/%s", s.zfsPool, path))
+		fmt.Sprintf("%s/%s", s.pool.PoolName, path))
 
 	if err != nil {
 		s.log.Error("zfs destroy failed", log.Ctx{"output": string(output)})
@@ -833,10 +1109,10 @@ func (s *storageZfs) zfsDestroy(path string) error {
 	return nil
 }
 
-func (s *storageZfs) zfsCleanup(path string) error {
+func (s *storageZfs) zfsPoolVolumeCleanup(path string) error {
 	if strings.HasPrefix(path, "deleted/") {
 		// Cleanup of filesystems kept for refcount reason
-		removablePath, err := s.zfsSnapshotRemovable(path, "")
+		removablePath, err := s.zfsPoolVolumeSnapshotRemovable(path, "")
 		if err != nil {
 			return err
 		}
@@ -845,40 +1121,40 @@ func (s *storageZfs) zfsCleanup(path string) error {
 		if removablePath {
 			if strings.Contains(path, "@") {
 				// Cleanup snapshots
-				err = s.zfsDestroy(path)
+				err = s.zfsPoolVolumeDestroy(path)
 				if err != nil {
 					return err
 				}
 
 				// Check if the parent can now be deleted
 				subPath := strings.SplitN(path, "@", 2)[0]
-				snaps, err := s.zfsListSnapshots(subPath)
+				snaps, err := s.zfsPoolListSnapshots(subPath)
 				if err != nil {
 					return err
 				}
 
 				if len(snaps) == 0 {
-					err := s.zfsCleanup(subPath)
+					err := s.zfsPoolVolumeCleanup(subPath)
 					if err != nil {
 						return err
 					}
 				}
 			} else {
 				// Cleanup filesystems
-				origin, err := s.zfsGet(path, "origin")
+				origin, err := s.zfsPoolVolumeGet(path, "origin")
 				if err != nil {
 					return err
 				}
-				origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.zfsPool))
+				origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.pool.PoolName))
 
-				err = s.zfsDestroy(path)
+				err = s.zfsPoolVolumeDestroy(path)
 				if err != nil {
 					return err
 				}
 
 				// Attempt to remove its parent
 				if origin != "-" {
-					err := s.zfsCleanup(origin)
+					err := s.zfsPoolVolumeCleanup(origin)
 					if err != nil {
 						return err
 					}
@@ -889,7 +1165,7 @@ func (s *storageZfs) zfsCleanup(path string) error {
 		}
 	} else if strings.HasPrefix(path, "containers") && strings.Contains(path, "@copy-") {
 		// Just remove the copy- snapshot for copies of active containers
-		err := s.zfsDestroy(path)
+		err := s.zfsPoolVolumeDestroy(path)
 		if err != nil {
 			return err
 		}
@@ -898,17 +1174,17 @@ func (s *storageZfs) zfsCleanup(path string) error {
 	return nil
 }
 
-func (s *storageZfs) zfsExists(path string) bool {
-	output, _ := s.zfsGet(path, "name")
+func (s *storageZfs) zfsPoolVolumeExists(path string) bool {
+	output, _ := s.zfsPoolVolumeGet(path, "name")
 
-	if output == fmt.Sprintf("%s/%s", s.zfsPool, path) {
+	if output == fmt.Sprintf("%s/%s", s.pool.PoolName, path) {
 		return true
 	}
 
 	return false
 }
 
-func (s *storageZfs) zfsGet(path string, key string) (string, error) {
+func (s *storageZfs) zfsPoolVolumeGet(path string, key string) (string, error) {
 	output, err := exec.Command(
 		"zfs",
 		"get",
@@ -916,7 +1192,7 @@ func (s *storageZfs) zfsGet(path string, key string) (string, error) {
 		"-p",
 		"-o", "value",
 		key,
-		fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput()
+		fmt.Sprintf("%s/%s", s.pool.PoolName, path)).CombinedOutput()
 	if err != nil {
 		return string(output), fmt.Errorf("Failed to get ZFS config: %s", output)
 	}
@@ -924,7 +1200,7 @@ func (s *storageZfs) zfsGet(path string, key string) (string, error) {
 	return strings.TrimRight(string(output), "\n"), nil
 }
 
-func (s *storageZfs) zfsRename(source string, dest string) error {
+func (s *storageZfs) zfsPoolVolumeRename(source string, dest string) error {
 	var err error
 	var output []byte
 
@@ -933,8 +1209,8 @@ func (s *storageZfs) zfsRename(source string, dest string) error {
 			"zfs",
 			"rename",
 			"-p",
-			fmt.Sprintf("%s/%s", s.zfsPool, source),
-			fmt.Sprintf("%s/%s", s.zfsPool, dest)).CombinedOutput()
+			fmt.Sprintf("%s/%s", s.pool.PoolName, source),
+			fmt.Sprintf("%s/%s", s.pool.PoolName, dest)).CombinedOutput()
 
 		// Success
 		if err == nil {
@@ -942,7 +1218,7 @@ func (s *storageZfs) zfsRename(source string, dest string) error {
 		}
 
 		// zfs rename can fail because of descendants, yet still manage the rename
-		if !s.zfsExists(source) && s.zfsExists(dest) {
+		if !s.zfsPoolVolumeExists(source) && s.zfsPoolVolumeExists(dest) {
 			return nil
 		}
 
@@ -954,12 +1230,12 @@ func (s *storageZfs) zfsRename(source string, dest string) error {
 	return fmt.Errorf("Failed to rename ZFS filesystem: %s", output)
 }
 
-func (s *storageZfs) zfsSet(path string, key string, value string) error {
+func (s *storageZfs) zfsPoolVolumeSet(path string, key string, value string) error {
 	output, err := exec.Command(
 		"zfs",
 		"set",
 		fmt.Sprintf("%s=%s", key, value),
-		fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput()
+		fmt.Sprintf("%s/%s", s.pool.PoolName, path)).CombinedOutput()
 	if err != nil {
 		s.log.Error("zfs set failed", log.Ctx{"output": string(output)})
 		return fmt.Errorf("Failed to set ZFS config: %s", output)
@@ -968,12 +1244,12 @@ func (s *storageZfs) zfsSet(path string, key string, value string) error {
 	return nil
 }
 
-func (s *storageZfs) zfsSnapshotCreate(path string, name string) error {
+func (s *storageZfs) zfsPoolVolumeSnapshotCreate(path string, name string) error {
 	output, err := exec.Command(
 		"zfs",
 		"snapshot",
 		"-r",
-		fmt.Sprintf("%s/%s@%s", s.zfsPool, path, name)).CombinedOutput()
+		fmt.Sprintf("%s/%s@%s", s.pool.PoolName, path, name)).CombinedOutput()
 	if err != nil {
 		s.log.Error("zfs snapshot failed", log.Ctx{"output": string(output)})
 		return fmt.Errorf("Failed to create ZFS snapshot: %s", output)
@@ -982,12 +1258,12 @@ func (s *storageZfs) zfsSnapshotCreate(path string, name string) error {
 	return nil
 }
 
-func (s *storageZfs) zfsSnapshotDestroy(path string, name string) error {
+func (s *storageZfs) zfsPoolVolumeSnapshotDestroy(path string, name string) error {
 	output, err := exec.Command(
 		"zfs",
 		"destroy",
 		"-r",
-		fmt.Sprintf("%s/%s@%s", s.zfsPool, path, name)).CombinedOutput()
+		fmt.Sprintf("%s/%s@%s", s.pool.PoolName, path, name)).CombinedOutput()
 	if err != nil {
 		s.log.Error("zfs destroy failed", log.Ctx{"output": string(output)})
 		return fmt.Errorf("Failed to destroy ZFS snapshot: %s", output)
@@ -996,23 +1272,23 @@ func (s *storageZfs) zfsSnapshotDestroy(path string, name string) error {
 	return nil
 }
 
-func (s *storageZfs) zfsSnapshotRestore(path string, name string) error {
+func (s *storageZfs) zfsPoolVolumeSnapshotRestore(path string, name string) error {
 	output, err := tryExec(
 		"zfs",
 		"rollback",
-		fmt.Sprintf("%s/%s@%s", s.zfsPool, path, name))
+		fmt.Sprintf("%s/%s@%s", s.pool.PoolName, path, name))
 	if err != nil {
 		s.log.Error("zfs rollback failed", log.Ctx{"output": string(output)})
 		return fmt.Errorf("Failed to restore ZFS snapshot: %s", output)
 	}
 
-	subvols, err := s.zfsListSubvolumes(path)
+	subvols, err := s.zfsPoolListSubvolumes(path)
 	if err != nil {
 		return err
 	}
 
 	for _, sub := range subvols {
-		snaps, err := s.zfsListSnapshots(sub)
+		snaps, err := s.zfsPoolListSnapshots(sub)
 		if err != nil {
 			return err
 		}
@@ -1024,7 +1300,7 @@ func (s *storageZfs) zfsSnapshotRestore(path string, name string) error {
 		output, err := tryExec(
 			"zfs",
 			"rollback",
-			fmt.Sprintf("%s/%s@%s", s.zfsPool, sub, name))
+			fmt.Sprintf("%s/%s@%s", s.pool.PoolName, sub, name))
 		if err != nil {
 			s.log.Error("zfs rollback failed", log.Ctx{"output": string(output)})
 			return fmt.Errorf("Failed to restore ZFS sub-volume snapshot: %s", output)
@@ -1034,13 +1310,13 @@ func (s *storageZfs) zfsSnapshotRestore(path string, name string) error {
 	return nil
 }
 
-func (s *storageZfs) zfsSnapshotRename(path string, oldName string, newName string) error {
+func (s *storageZfs) zfsPoolVolumeSnapshotRename(path string, oldName string, newName string) error {
 	output, err := exec.Command(
 		"zfs",
 		"rename",
 		"-r",
-		fmt.Sprintf("%s/%s@%s", s.zfsPool, path, oldName),
-		fmt.Sprintf("%s/%s@%s", s.zfsPool, path, newName)).CombinedOutput()
+		fmt.Sprintf("%s/%s@%s", s.pool.PoolName, path, oldName),
+		fmt.Sprintf("%s/%s@%s", s.pool.PoolName, path, newName)).CombinedOutput()
 	if err != nil {
 		s.log.Error("zfs snapshot rename failed", log.Ctx{"output": string(output)})
 		return fmt.Errorf("Failed to rename ZFS snapshot: %s", output)
@@ -1049,11 +1325,11 @@ func (s *storageZfs) zfsSnapshotRename(path string, oldName string, newName stri
 	return nil
 }
 
-func (s *storageZfs) zfsMount(path string) error {
+func (s *storageZfs) zfsPoolVolumeMount(path string) error {
 	output, err := tryExec(
 		"zfs",
 		"mount",
-		fmt.Sprintf("%s/%s", s.zfsPool, path))
+		fmt.Sprintf("%s/%s", s.pool.PoolName, path))
 	if err != nil {
 		s.log.Error("zfs mount failed", log.Ctx{"output": string(output)})
 		return fmt.Errorf("Failed to mount ZFS filesystem: %s", output)
@@ -1062,11 +1338,11 @@ func (s *storageZfs) zfsMount(path string) error {
 	return nil
 }
 
-func (s *storageZfs) zfsUnmount(path string) error {
+func (s *storageZfs) zfsPoolVolumeUnmount(path string) error {
 	output, err := tryExec(
 		"zfs",
 		"unmount",
-		fmt.Sprintf("%s/%s", s.zfsPool, path))
+		fmt.Sprintf("%s/%s", s.pool.PoolName, path))
 	if err != nil {
 		s.log.Error("zfs unmount failed", log.Ctx{"output": string(output)})
 		return fmt.Errorf("Failed to unmount ZFS filesystem: %s", output)
@@ -1075,11 +1351,11 @@ func (s *storageZfs) zfsUnmount(path string) error {
 	return nil
 }
 
-func (s *storageZfs) zfsListSubvolumes(path string) ([]string, error) {
+func (s *storageZfs) zfsPoolListSubvolumes(path string) ([]string, error) {
 	path = strings.TrimRight(path, "/")
-	fullPath := s.zfsPool
+	fullPath := s.pool.PoolName
 	if path != "" {
-		fullPath = fmt.Sprintf("%s/%s", s.zfsPool, path)
+		fullPath = fmt.Sprintf("%s/%s", s.pool.PoolName, path)
 	}
 
 	output, err := exec.Command(
@@ -1104,17 +1380,17 @@ func (s *storageZfs) zfsListSubvolumes(path string) ([]string, error) {
 			continue
 		}
 
-		children = append(children, strings.TrimPrefix(entry, fmt.Sprintf("%s/", s.zfsPool)))
+		children = append(children, strings.TrimPrefix(entry, fmt.Sprintf("%s/", s.pool.PoolName)))
 	}
 
 	return children, nil
 }
 
-func (s *storageZfs) zfsListSnapshots(path string) ([]string, error) {
+func (s *storageZfs) zfsPoolListSnapshots(path string) ([]string, error) {
 	path = strings.TrimRight(path, "/")
-	fullPath := s.zfsPool
+	fullPath := s.pool.PoolName
 	if path != "" {
-		fullPath = fmt.Sprintf("%s/%s", s.zfsPool, path)
+		fullPath = fmt.Sprintf("%s/%s", s.pool.PoolName, path)
 	}
 
 	output, err := exec.Command(
@@ -1147,7 +1423,7 @@ func (s *storageZfs) zfsListSnapshots(path string) ([]string, error) {
 	return children, nil
 }
 
-func (s *storageZfs) zfsSnapshotRemovable(path string, name string) (bool, error) {
+func (s *storageZfs) zfsPoolVolumeSnapshotRemovable(path string, name string) (bool, error) {
 	var snap string
 	if name == "" {
 		snap = path
@@ -1155,7 +1431,7 @@ func (s *storageZfs) zfsSnapshotRemovable(path string, name string) (bool, error
 		snap = fmt.Sprintf("%s@%s", path, name)
 	}
 
-	clones, err := s.zfsGet(snap, "clones")
+	clones, err := s.zfsPoolVolumeGet(snap, "clones")
 	if err != nil {
 		return false, err
 	}
@@ -1167,8 +1443,8 @@ func (s *storageZfs) zfsSnapshotRemovable(path string, name string) (bool, error
 	return false, nil
 }
 
-func (s *storageZfs) zfsGetPoolUsers() ([]string, error) {
-	subvols, err := s.zfsListSubvolumes("")
+func (s *storageZfs) zfsPoolGetUsers() ([]string, error) {
+	subvols, err := s.zfsPoolListSubvolumes("")
 	if err != nil {
 		return []string{}, err
 	}
@@ -1204,6 +1480,7 @@ func (s *storageZfs) zfsGetPoolUsers() ([]string, error) {
 // Global functions
 func storageZFSValidatePoolName(d *Daemon, key string, value string) error {
 	s := storageZfs{}
+	s.pool = &api.StoragePool{}
 
 	// Confirm the backend is working
 	err := s.initShared()
@@ -1213,14 +1490,14 @@ func storageZFSValidatePoolName(d *Daemon, key string, value string) error {
 
 	// Confirm the new pool exists and is compatible
 	if value != "" {
-		err = s.zfsCheckPool(value)
+		err = s.zfsPoolCheck(value)
 		if err != nil {
 			return fmt.Errorf("Invalid ZFS pool: %v", err)
 		}
 
 		// Confirm that the new pool is empty
-		s.zfsPool = value
-		subvols, err := s.zfsListSubvolumes("")
+		s.pool.PoolName = value
+		subvols, err := s.zfsPoolListSubvolumes("")
 		if err != nil {
 			return err
 		}
@@ -1233,9 +1510,9 @@ func storageZFSValidatePoolName(d *Daemon, key string, value string) error {
 	// Confirm the old pool isn't in use anymore
 	oldPoolname := daemonConfig["storage.zfs_pool_name"].Get()
 	if oldPoolname != "" {
-		s.zfsPool = oldPoolname
+		s.pool.PoolName = oldPoolname
 
-		users, err := s.zfsGetPoolUsers()
+		users, err := s.zfsPoolGetUsers()
 		if err != nil {
 			return fmt.Errorf("Error checking if a pool is already in use: %v", err)
 		}
@@ -1263,9 +1540,9 @@ func (s *zfsMigrationSourceDriver) Snapshots() []container {
 
 func (s *zfsMigrationSourceDriver) send(conn *websocket.Conn, zfsName string, zfsParent string, readWrapper func(io.ReadCloser) io.ReadCloser) error {
 	fields := strings.SplitN(s.container.Name(), shared.SnapshotDelimiter, 2)
-	args := []string{"send", fmt.Sprintf("%s/containers/%s@%s", s.zfs.zfsPool, fields[0], zfsName)}
+	args := []string{"send", fmt.Sprintf("%s/containers/%s@%s", s.zfs.pool.PoolName, fields[0], zfsName)}
 	if zfsParent != "" {
-		args = append(args, "-i", fmt.Sprintf("%s/containers/%s@%s", s.zfs.zfsPool, s.container.Name(), zfsParent))
+		args = append(args, "-i", fmt.Sprintf("%s/containers/%s@%s", s.zfs.pool.PoolName, s.container.Name(), zfsParent))
 	}
 
 	cmd := exec.Command("zfs", args...)
@@ -1329,7 +1606,7 @@ func (s *zfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *op
 	}
 
 	s.runningSnapName = fmt.Sprintf("migration-send-%s", uuid.NewRandom().String())
-	if err := s.zfs.zfsSnapshotCreate(fmt.Sprintf("containers/%s", s.container.Name()), s.runningSnapName); err != nil {
+	if err := s.zfs.zfsPoolVolumeSnapshotCreate(fmt.Sprintf("containers/%s", s.container.Name()), s.runningSnapName); err != nil {
 		return err
 	}
 
@@ -1343,7 +1620,7 @@ func (s *zfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *op
 
 func (s *zfsMigrationSourceDriver) SendAfterCheckpoint(conn *websocket.Conn) error {
 	s.stoppedSnapName = fmt.Sprintf("migration-send-%s", uuid.NewRandom().String())
-	if err := s.zfs.zfsSnapshotCreate(fmt.Sprintf("containers/%s", s.container.Name()), s.stoppedSnapName); err != nil {
+	if err := s.zfs.zfsPoolVolumeSnapshotCreate(fmt.Sprintf("containers/%s", s.container.Name()), s.stoppedSnapName); err != nil {
 		return err
 	}
 
@@ -1356,11 +1633,11 @@ func (s *zfsMigrationSourceDriver) SendAfterCheckpoint(conn *websocket.Conn) err
 
 func (s *zfsMigrationSourceDriver) Cleanup() {
 	if s.stoppedSnapName != "" {
-		s.zfs.zfsSnapshotDestroy(fmt.Sprintf("containers/%s", s.container.Name()), s.stoppedSnapName)
+		s.zfs.zfsPoolVolumeSnapshotDestroy(fmt.Sprintf("containers/%s", s.container.Name()), s.stoppedSnapName)
 	}
 
 	if s.runningSnapName != "" {
-		s.zfs.zfsSnapshotDestroy(fmt.Sprintf("containers/%s", s.container.Name()), s.runningSnapName)
+		s.zfs.zfsPoolVolumeSnapshotDestroy(fmt.Sprintf("containers/%s", s.container.Name()), s.runningSnapName)
 	}
 }
 
@@ -1391,7 +1668,7 @@ func (s *storageZfs) MigrationSource(ct container) (MigrationStorageSourceDriver
 	 * is that we send the oldest to newest snapshot, hopefully saving on
 	 * xfer costs. Then, after all that, we send the container itself.
 	 */
-	snapshots, err := s.zfsListSnapshots(fmt.Sprintf("containers/%s", ct.Name()))
+	snapshots, err := s.zfsPoolListSnapshots(fmt.Sprintf("containers/%s", ct.Name()))
 	if err != nil {
 		return nil, err
 	}
@@ -1421,7 +1698,7 @@ func (s *storageZfs) MigrationSource(ct container) (MigrationStorageSourceDriver
 
 func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet, op *operation) error {
 	zfsRecv := func(zfsName string, writeWrapper func(io.WriteCloser) io.WriteCloser) error {
-		zfsFsName := fmt.Sprintf("%s/%s", s.zfsPool, zfsName)
+		zfsFsName := fmt.Sprintf("%s/%s", s.pool.PoolName, zfsName)
 		args := []string{"receive", "-F", "-u", zfsFsName}
 		cmd := exec.Command("zfs", args...)
 
@@ -1465,7 +1742,7 @@ func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*
 	 * unmounted, so we do this before receiving anything.
 	 */
 	zfsName := fmt.Sprintf("containers/%s", container.Name())
-	err := s.zfsUnmount(zfsName)
+	err := s.zfsPoolVolumeUnmount(zfsName)
 	if err != nil {
 		return err
 	}
@@ -1496,7 +1773,7 @@ func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*
 
 	defer func() {
 		/* clean up our migration-send snapshots that we got from recv. */
-		zfsSnapshots, err := s.zfsListSnapshots(fmt.Sprintf("containers/%s", container.Name()))
+		zfsSnapshots, err := s.zfsPoolListSnapshots(fmt.Sprintf("containers/%s", container.Name()))
 		if err != nil {
 			shared.LogError("failed listing snapshots post migration", log.Ctx{"err": err})
 			return
@@ -1508,7 +1785,7 @@ func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*
 				continue
 			}
 
-			s.zfsSnapshotDestroy(fmt.Sprintf("containers/%s", container.Name()), snap)
+			s.zfsPoolVolumeSnapshotDestroy(fmt.Sprintf("containers/%s", container.Name()), snap)
 		}
 	}()
 
@@ -1531,6 +1808,6 @@ func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*
 	 * but sometimes it doesn't. Let's try to mount, but not complain about
 	 * failure.
 	 */
-	s.zfsMount(zfsName)
+	s.zfsPoolVolumeMount(zfsName)
 	return nil
 }

From 567f7ab6489305ba33a054ee4168439373ec4771 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 12:27:29 +0100
Subject: [PATCH 14/63] lxd/container*: adapt to new storage interface

This allows for creation of containers on different storage pools.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/container.go          | 15 +++++++++++++++
 lxd/container_lxc.go      | 24 +++++++++++++++++++-----
 lxd/container_snapshot.go |  4 ++++
 lxd/containers_get.go     |  6 ++++++
 lxd/containers_post.go    | 44 ++++++++++++++++++++++++++++++++++++++++----
 5 files changed, 84 insertions(+), 9 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 42febae..319361f 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -659,6 +659,21 @@ func containerCreateInternal(d *Daemon, args containerArgs) (container, error) {
 		return nil, err
 	}
 
+	// Get the ID of the storage pool to which the storage volume we will
+	// create for the container will be attached to.
+	poolID, err := dbStoragePoolGetID(d.db, args.StoragePool)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create a new database entry for the container's storage volume we
+	// will create on the storage pool.
+	var volumeConfig map[string]string
+	_, err = dbStoragePoolVolumeCreate(d.db, args.Name, storagePoolVolumeTypeContainer, poolID, volumeConfig)
+	if err != nil {
+		return nil, err
+	}
+
 	// Wipe any existing log for this container name
 	os.RemoveAll(shared.LogPath(args.Name))
 
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index f524920..409617c 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -203,11 +203,16 @@ func containerLXCCreate(d *Daemon, args containerArgs) (container, error) {
 
 	shared.LogInfo("Creating container", ctxMap)
 
-	// No need to detect storage here, its a new container.
-	c.storage = d.Storage
+	// Initialize the container storage.
+	cStorage, err := storagePoolVolumeContainerCreateInit(d, c.storagePool, args.Name)
+	if err != nil {
+		shared.LogError("Failed to initialize container storage", ctxMap)
+		return nil, err
+	}
+	c.storage = cStorage
 
 	// Load the config
-	err := c.init()
+	err = c.init()
 	if err != nil {
 		c.Delete()
 		shared.LogError("Failed creating container", ctxMap)
@@ -357,8 +362,7 @@ func containerLXCLoad(d *Daemon, args containerArgs) (container, error) {
 		storagePool:  args.StoragePool,
 	}
 
-	// Detect the storage backend
-	s, err := storageForFilename(d, shared.VarPath("containers", strings.Split(c.name, "/")[0]))
+	s, err := storagePoolVolumeContainerLoadInit(d, args.Name)
 	if err != nil {
 		return nil, err
 	}
@@ -2658,6 +2662,16 @@ func (c *containerLXC) Delete() error {
 		return err
 	}
 
+	// Get the name of the storage pool the container is attached to. This
+	// reverse-engineering works because container names are globally
+	// unique.
+	poolID := c.storage.ContainerPoolIDGet()
+	// Remove volume from storage pool.
+	err := dbStoragePoolVolumeDelete(c.daemon.db, c.Name(), storagePoolVolumeTypeContainer, poolID)
+	if err != nil {
+		return err
+	}
+
 	// Update network files
 	networkUpdateStatic(c.daemon, "")
 	for k, m := range c.expandedDevices {
diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index 473e2a2..d7e02d3 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -95,6 +95,9 @@ func nextSnapshot(d *Daemon, name string) int {
 
 func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+	if name == "" {
+		return BadRequest(fmt.Errorf("You must provide the name of the container to snapshot."))
+	}
 
 	/*
 	 * snapshot is a three step operation:
@@ -133,6 +136,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 			Architecture: c.Architecture(),
 			Devices:      c.LocalDevices(),
 			Stateful:     req.Stateful,
+			StoragePool:  c.Storage().ContainerPoolGet(),
 		}
 
 		_, err := containerCreateAsSnapshot(d, args, c)
diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 2a10110..55e2ffd 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -54,6 +54,10 @@ func doContainersGet(d *Daemon, recursion bool) (interface{}, error) {
 					Status:     api.Error.String(),
 					StatusCode: api.Error}
 			}
+			// COMMENT(brauner): Get the storage pool the
+			// container's storage volume exists on without loading
+			// the storage interface
+			c.StoragePool, _ = dbContainerPool(d.db, container)
 			resultList = append(resultList, c)
 		}
 	}
@@ -66,6 +70,8 @@ func doContainersGet(d *Daemon, recursion bool) (interface{}, error) {
 }
 
 func doContainerGet(d *Daemon, cname string) (*api.Container, error) {
+	// COMMENT(brauner): Do not initialize a storage interface, we don't
+	// need it here.
 	c, err := containerLoadByName(d, cname)
 	if err != nil {
 		return nil, err
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 428dfa6..8d05150 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -88,7 +88,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 		if req.Source.Server != "" {
 			hash, err = d.ImageDownload(
 				op, req.Source.Server, req.Source.Protocol, req.Source.Certificate, req.Source.Secret,
-				hash, true, daemonConfig["images.auto_update_cached"].GetBool())
+				hash, true, daemonConfig["images.auto_update_cached"].GetBool(), req.StoragePool)
 			if err != nil {
 				return err
 			}
@@ -204,16 +204,33 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 	 * point and just negotiate it over the migration control
 	 * socket. Anyway, it'll happen later :)
 	 */
-	if err == nil && d.Storage.MigrationType() == MigrationFSType_RSYNC {
-		c, err = containerCreateFromImage(d, args, req.Source.BaseImage)
+	if err != nil {
+		c, err = containerCreateAsEmpty(d, args)
 		if err != nil {
 			return InternalError(err)
 		}
 	} else {
-		c, err = containerCreateAsEmpty(d, args)
+		ps, err := storagePoolInit(d, req.StoragePool)
+		if err != nil {
+			return InternalError(err)
+		}
+
+		err = ps.StoragePoolCheck()
 		if err != nil {
 			return InternalError(err)
 		}
+
+		if ps.MigrationType() == MigrationFSType_RSYNC {
+			c, err = containerCreateFromImage(d, args, req.Source.BaseImage)
+			if err != nil {
+				return InternalError(err)
+			}
+		} else {
+			c, err = containerCreateAsEmpty(d, args)
+			if err != nil {
+				return InternalError(err)
+			}
+		}
 	}
 
 	var cert *x509.Certificate
@@ -373,6 +390,25 @@ func containersPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
+	// If no storage pool is found, error out.
+	pools, err := dbStoragePools(d.db)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	if len(pools) == 0 {
+		return BadRequest(fmt.Errorf("No storage pool found. Please create a new storage pool."))
+	}
+
+	// If we receive no storage pool, check whether a default storage pool
+	// is set. If no default storage pool is set, choose a random pool.
+	if req.StoragePool == "" {
+		req.StoragePool = daemonConfig["storage.default_pool"].Get()
+		if req.StoragePool == "" {
+			req.StoragePool = pools[0]
+		}
+	}
+
 	if req.Name == "" {
 		cs, err := dbContainersList(d.db, cTypeRegular)
 		if err != nil {

From 158ab37550017cb45fe6b0e38a351574a4587eb0 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 12:31:58 +0100
Subject: [PATCH 15/63] lxd/daemon_config: implement storage.default_pool

This config key allows for setting a default storage pool in which storage
volumes (containers, custom volumes) etc. are supposed to be created.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/daemon_config.go | 14 ++++++++++++++
 lxd/storage_pools.go |  6 ++++++
 2 files changed, 20 insertions(+)

diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index 1e5e791..19e0599 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -196,6 +196,7 @@ func daemonConfigInit(db *sql.DB) error {
 		"storage.zfs_pool_name":        {valueType: "string", validator: storageZFSValidatePoolName, setter: daemonConfigSetStorage},
 		"storage.zfs_remove_snapshots": {valueType: "bool"},
 		"storage.zfs_use_refquota":     {valueType: "bool"},
+		"storage.default_pool":         {valueType: "string", validator: daemonConfigValidateDefaultPool},
 	}
 
 	// Load the values from the DB
@@ -332,3 +333,16 @@ func daemonConfigValidateCompression(d *Daemon, key string, value string) error
 	_, err := exec.LookPath(value)
 	return err
 }
+
+func daemonConfigValidateDefaultPool(d *Daemon, key string, value string) error {
+	if value == "" {
+		return nil
+	}
+
+	_, err := dbStoragePoolGetID(d.db, value)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index fc86f0a..3afc072 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -301,6 +301,12 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
+	// COMMENT(brauner): In case we deleted the default storage pool, unset
+	// the default storage pool.
+	if daemonConfig["storage.default_pool"].Get() == poolName {
+		daemonConfig["storage.default_pool"].Set(d, "")
+	}
+
 	return EmptySyncResponse
 }
 

From 5aeda8bd5d9769181718a5d81adec14e5d05def9 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 12:37:11 +0100
Subject: [PATCH 16/63] lxd/images: adapt to new storage interface

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/daemon_config.go |   1 +
 lxd/daemon_images.go |  64 ++++++++++++--
 lxd/images.go        | 235 +++++++++++++++++++++++++++++++++++++++++----------
 shared/api/image.go  |   5 ++
 4 files changed, 254 insertions(+), 51 deletions(-)

diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index 19e0599..adb72ea 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -187,6 +187,7 @@ func daemonConfigInit(db *sql.DB) error {
 		"images.auto_update_interval":  {valueType: "int", defaultValue: "6"},
 		"images.compression_algorithm": {valueType: "string", validator: daemonConfigValidateCompression, defaultValue: "gzip"},
 		"images.remote_cache_expiry":   {valueType: "int", defaultValue: "10", trigger: daemonConfigTriggerExpiry},
+		"images.default_storage_pool":  {valueType: "string", validator: daemonConfigValidateDefaultPool},
 
 		"storage.lvm_fstype":           {valueType: "string", defaultValue: "ext4", validValues: []string{"ext4", "xfs"}},
 		"storage.lvm_mount_options":    {valueType: "string", defaultValue: "discard"},
diff --git a/lxd/daemon_images.go b/lxd/daemon_images.go
index 97739ab..1cd4a62 100644
--- a/lxd/daemon_images.go
+++ b/lxd/daemon_images.go
@@ -83,7 +83,7 @@ func imageLoadStreamCache(d *Daemon) error {
 
 // ImageDownload checks if we have that Image Fingerprint else
 // downloads the image from a remote server.
-func (d *Daemon) ImageDownload(op *operation, server string, protocol string, certificate string, secret string, alias string, forContainer bool, autoUpdate bool) (string, error) {
+func (d *Daemon) ImageDownload(op *operation, server string, protocol string, certificate string, secret string, alias string, forContainer bool, autoUpdate bool, storagePool string) (string, error) {
 	var err error
 	var ss *simplestreams.SimpleStreams
 	var ctxMap log.Ctx
@@ -181,9 +181,46 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 		}
 	}
 
-	if _, _, err := dbImageGet(d.db, fp, false, false); err == nil {
+	// COMMENT(brauner): Check if the image already exists on any storage pool.
+	_, imgInfo, err := dbImageGet(d.db, fp, false, false)
+	if err == nil {
+		// COMMENT(brauner): Set storage pool on which the image is
+		// supposed to exist.
+		imgInfo.StoragePool = storagePool
+
 		shared.LogDebug("Image already exists in the db", log.Ctx{"image": fp})
-		// already have it
+
+		// COMMENT(brauner): Get the ID of the storage pool on which a
+		// storage volume for the image needs to exist.
+		poolID, err := dbStoragePoolGetID(d.db, storagePool)
+		if err != nil {
+			return "", err
+		}
+
+		// COMMENT(brauner): Get the IDs of all storage pools on which a
+		// storage volume for the requested image currently exists.
+		poolIDs, err := dbImageGetPools(d.db, imgInfo.Fingerprint)
+		if err != nil {
+			return "", err
+		}
+
+		// COMMENT(brauner): Check if the image already exists on the
+		// current storage pool.
+		if shared.Int64InSlice(poolID, poolIDs) {
+			shared.LogDebugf("Image already exists on storage pool \"%s\".", storagePool)
+			return fp, nil
+		}
+
+		shared.LogDebugf("Image does not exists on storage pool \"%s\".", storagePool)
+
+		// COMMENT(brauner): Create a duplicate entry for the image.
+		err = imageCreateInPool(d, imgInfo)
+		if err != nil {
+			shared.LogDebugf("Failed to create image on storage pool \"%s\": %s.", storagePool, err)
+			return "", err
+		}
+
+		shared.LogDebugf("Created image on storage pool \"%s\".", storagePool)
 		return fp, nil
 	}
 
@@ -245,10 +282,21 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 	var info api.Image
 	info.Fingerprint = fp
 
+	// Set storage pool to which the image is supposed to be attached to.
+	info.StoragePool = storagePool
+
 	destDir := shared.VarPath("images")
 	destName := filepath.Join(destDir, fp)
 	if shared.PathExists(destName) {
-		d.Storage.ImageDelete(fp)
+		is, err := storagePoolVolumeImageInit(d, info.StoragePool, info.Fingerprint)
+		if err != nil {
+			shared.LogWarnf("Failed to initialize the storage volume for the image: %s.", err)
+		} else {
+			err := is.ImageDelete(info.Fingerprint)
+			if err != nil {
+				shared.LogWarnf("Failed to delete image: %s.", err)
+			}
+		}
 	}
 
 	progress := func(progressInt int64, speedInt int64) {
@@ -323,7 +371,11 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 		info.Public = false
 		info.AutoUpdate = autoUpdate
 
-		_, err = imageBuildFromInfo(d, *info)
+		// Set storage pool to which the image is supposed to be
+		// attached to.
+		info.StoragePool = storagePool
+
+		_, err = imageBuildFromInfo(d, info)
 		if err != nil {
 			return "", err
 		}
@@ -491,7 +543,7 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 		info.AutoUpdate = autoUpdate
 	}
 
-	_, err = imageBuildFromInfo(d, info)
+	_, err = imageBuildFromInfo(d, &info)
 	if err != nil {
 		shared.LogError(
 			"Failed to create image",
diff --git a/lxd/images.go b/lxd/images.go
index 10c179b..9aea16b 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -331,7 +331,7 @@ func imgPostRemoteInfo(d *Daemon, req api.ImagesPost, op *operation) error {
 		return fmt.Errorf("must specify one of alias or fingerprint for init from image")
 	}
 
-	hash, err = d.ImageDownload(op, req.Source["server"], req.Source["protocol"], req.Source["certificate"], req.Source["secret"], hash, false, req.AutoUpdate)
+	hash, err = d.ImageDownload(op, req.Source["server"], req.Source["protocol"], req.Source["certificate"], req.Source["secret"], hash, false, req.AutoUpdate, req.StoragePool)
 	if err != nil {
 		return err
 	}
@@ -405,7 +405,7 @@ func imgPostURLInfo(d *Daemon, req api.ImagesPost, op *operation) error {
 	}
 
 	// Import the image
-	hash, err = d.ImageDownload(op, url, "direct", "", "", hash, false, req.AutoUpdate)
+	hash, err = d.ImageDownload(op, url, "direct", "", "", hash, false, req.AutoUpdate, req.StoragePool)
 	if err != nil {
 		return err
 	}
@@ -626,12 +626,74 @@ func getImgPostInfo(d *Daemon, r *http.Request,
 	return info, nil
 }
 
-func imageBuildFromInfo(d *Daemon, info api.Image) (metadata map[string]string, err error) {
-	err = d.Storage.ImageCreate(info.Fingerprint)
+// imageCreateInPool() creates a new storage volume in a given storage pool for
+// the image. No entry in the images database will be created. This implies that
+// imageCreateinPool() should only be called when an image already exists in the
+// database and hence has already a storage volume in at least one storage pool.
+func imageCreateInPool(d *Daemon, info *api.Image) error {
+	if info.StoragePool == "" {
+		info.StoragePool = daemonConfig["images.default_storage_pool"].Get()
+		if info.StoragePool == "" {
+			info.StoragePool = daemonConfig["storage.default_pool"].Get()
+			if info.StoragePool == "" {
+				return fmt.Errorf("You must either set a default storage pool for images or specify one.")
+			}
+		}
+	}
+
+	// Get the ID of the storage pool to which the image will be attached.
+	poolID, err := dbStoragePoolGetID(d.db, info.StoragePool)
+	if err != nil {
+		return err
+	}
+
+	// Create a db entry for the storage volume of the image.
+	var volumeConfig map[string]string
+	_, err = dbStoragePoolVolumeCreate(d.db, info.Fingerprint, storagePoolVolumeTypeImage, poolID, volumeConfig)
+	if err != nil {
+		return err
+	}
+
+	// Define a function which reverts the everything. Defer this function
+	// so that it doesn't need to be explicitly called in every failing
+	// return path. Track whether or not we want to undo the changes using a
+	// closure.
+	deleteDbEntry := true
+	defer func() {
+		if deleteDbEntry {
+			dbStoragePoolVolumeDelete(d.db, info.Fingerprint, storagePoolVolumeTypeImage, poolID)
+		}
+	}()
+
+	// Initialize a new storage interface.
+	s, err := storagePoolVolumeImageInit(d, info.StoragePool, info.Fingerprint)
+	if err != nil {
+		return err
+	}
+
+	// Create the storage volume for the image on the requested storage
+	// pool.
+	err = s.ImageCreate(info.Fingerprint)
 	if err != nil {
 		os.Remove(shared.VarPath("images", info.Fingerprint))
 		os.Remove(shared.VarPath("images", info.Fingerprint) + ".rootfs")
+		return err
+	}
+
+	// Success, update the closure to mark that the changes should be kept.
+	deleteDbEntry = false
 
+	return nil
+}
+
+// imageBuilFromInfo() creates a new storage volume on a given storage pool for
+// the requested image. If successful, a new entry will be created in the images
+// database for the requested image. This implies that imageBuildFromInfo()
+// should only be called when the image does not already exist in the images
+// database and no storage volume exists in any storage pool for the image.
+func imageBuildFromInfo(d *Daemon, info *api.Image) (metadata map[string]string, err error) {
+	err = imageCreateInPool(d, info)
+	if err != nil {
 		return metadata, err
 	}
 
@@ -712,6 +774,10 @@ func imagesPost(d *Daemon, r *http.Request) Response {
 	run := func(op *operation) error {
 		var info api.Image
 
+		// Set storage pool to which the image is supposed to be
+		// attached to.
+		info.StoragePool = req.StoragePool
+
 		// Setup the cleanup function
 		defer cleanup(builddir, post)
 
@@ -750,7 +816,7 @@ func imagesPost(d *Daemon, r *http.Request) Response {
 			imagePublishLock.Unlock()
 		}
 
-		metadata, err := imageBuildFromInfo(d, info)
+		metadata, err := imageBuildFromInfo(d, &info)
 		if err != nil {
 			return err
 		}
@@ -878,38 +944,54 @@ func autoUpdateImages(d *Daemon) {
 			continue
 		}
 
-		shared.LogDebug("Processing image", log.Ctx{"fp": fp, "server": source.Server, "protocol": source.Protocol, "alias": source.Alias})
-
-		hash, err := d.ImageDownload(nil, source.Server, source.Protocol, "", "", source.Alias, false, true)
-		if hash == fp {
-			shared.LogDebug("Already up to date", log.Ctx{"fp": fp})
-			continue
-		} else if err != nil {
-			shared.LogError("Failed to update the image", log.Ctx{"err": err, "fp": fp})
-			continue
-		}
-
-		newId, _, err := dbImageGet(d.db, hash, false, true)
+		// Get the IDs of all storage volumes on which a storage volume
+		// for the requested image currently exists.
+		poolIDs, err := dbImageGetPools(d.db, fp)
 		if err != nil {
-			shared.LogError("Error loading image", log.Ctx{"err": err, "fp": hash})
 			continue
 		}
 
-		err = dbImageLastAccessUpdate(d.db, hash, info.LastUsedAt)
+		// Translate the IDs to poolNames.
+		poolNames, err := dbImageGetPoolNamesFromIDs(d.db, poolIDs)
 		if err != nil {
-			shared.LogError("Error setting last use date", log.Ctx{"err": err, "fp": hash})
 			continue
 		}
 
-		err = dbImageAliasesMove(d.db, id, newId)
-		if err != nil {
-			shared.LogError("Error moving aliases", log.Ctx{"err": err, "fp": hash})
-			continue
-		}
+		shared.LogDebug("Processing image", log.Ctx{"fp": fp, "server": source.Server, "protocol": source.Protocol, "alias": source.Alias})
 
-		err = doDeleteImage(d, fp)
-		if err != nil {
-			shared.LogError("Error deleting image", log.Ctx{"err": err, "fp": fp})
+		// Update the image on each pool where it currently exists.
+		for _, poolName := range poolNames {
+			hash, err := d.ImageDownload(nil, source.Server, source.Protocol, "", "", source.Alias, false, true, poolName)
+			if hash == fp {
+				shared.LogDebug("Already up to date", log.Ctx{"fp": fp})
+				continue
+			} else if err != nil {
+				shared.LogError("Failed to update the image", log.Ctx{"err": err, "fp": fp})
+				continue
+			}
+
+			newId, _, err := dbImageGet(d.db, hash, false, true)
+			if err != nil {
+				shared.LogError("Error loading image", log.Ctx{"err": err, "fp": hash})
+				continue
+			}
+
+			err = dbImageLastAccessUpdate(d.db, hash, info.LastUsedAt)
+			if err != nil {
+				shared.LogError("Error setting last use date", log.Ctx{"err": err, "fp": hash})
+				continue
+			}
+
+			err = dbImageAliasesMove(d.db, id, newId)
+			if err != nil {
+				shared.LogError("Error moving aliases", log.Ctx{"err": err, "fp": hash})
+				continue
+			}
+
+			err = doDeleteImage(d, fp, poolName)
+			if err != nil {
+				shared.LogError("Error deleting image", log.Ctx{"err": err, "fp": fp})
+			}
 		}
 	}
 
@@ -919,7 +1001,7 @@ func autoUpdateImages(d *Daemon) {
 func pruneExpiredImages(d *Daemon) {
 	shared.LogInfof("Pruning expired images")
 
-	// Get the list of expires images
+	// Get the list of expired images.
 	expiry := daemonConfig["images.remote_cache_expiry"].GetInt64()
 	images, err := dbImagesGetExpired(d.db, expiry)
 	if err != nil {
@@ -929,33 +1011,86 @@ func pruneExpiredImages(d *Daemon) {
 
 	// Delete them
 	for _, fp := range images {
-		if err := doDeleteImage(d, fp); err != nil {
-			shared.LogError("Error deleting image", log.Ctx{"err": err, "fp": fp})
+		// Get the IDs of all storage volumes on which a storage volume
+		// for the requested image currently exists.
+		poolIDs, err := dbImageGetPools(d.db, fp)
+		if err != nil {
+			continue
+		}
+
+		// Translate the IDs to poolNames.
+		poolNames, err := dbImageGetPoolNamesFromIDs(d.db, poolIDs)
+		if err != nil {
+			continue
+		}
+
+		for _, poolName := range poolNames {
+			err := doDeleteImage(d, fp, poolName)
+			if err != nil {
+				shared.LogError("Error deleting image", log.Ctx{"err": err, "fp": fp})
+			}
 		}
 	}
 
 	shared.LogInfof("Done pruning expired images")
 }
 
-func doDeleteImage(d *Daemon, fingerprint string) error {
+func doDeleteImage(d *Daemon, fingerprint string, storagePool string) error {
+	// Use the fingerprint we received in a LIKE query and use the full
+	// fingerprint we receive from the database in all further queries.
 	id, imgInfo, err := dbImageGet(d.db, fingerprint, false, false)
 	if err != nil {
 		return err
 	}
 
-	// get storage before deleting images/$fp because we need to
-	// look at the path
-	s, err := storageForImage(d, imgInfo)
+	// Retrieve the IDs of all the storage pools on which a storage volume
+	// for the requested image exists. We do this here because at this point
+	// we know there must be at least one storage volume on a storage pool
+	// for the requested image since there is a still a database entry for
+	// that image. So we are guaranteed to get at least len(poolIDs) >= 1.
+	// After a successfull storage volume delete from a given pool we can
+	// then simply check whether len(poolIDs) == 1 and only in this case do
+	// we delete the corresponding entry for the image from the database.
+	poolIDs, err := dbImageGetPools(d.db, imgInfo.Fingerprint)
 	if err != nil {
-		shared.LogError("error detecting image storage backend", log.Ctx{"fingerprint": imgInfo.Fingerprint, "err": err})
-	} else {
-		// Remove the image from storage backend
-		if err = s.ImageDelete(imgInfo.Fingerprint); err != nil {
-			shared.LogError("error deleting the image from storage backend", log.Ctx{"fingerprint": imgInfo.Fingerprint, "err": err})
-		}
+		return err
 	}
 
-	// Remove main image file
+	// Initialize a new storage interface.
+	s, err := storagePoolVolumeImageInit(d, storagePool, imgInfo.Fingerprint)
+	if err != nil {
+		return err
+	}
+
+	// Delete the storage volume for the image from the storage pool.
+	err = s.ImageDelete(imgInfo.Fingerprint)
+	if err != nil {
+		return err
+	}
+
+	// Get the ID of the storage pool the storage volume of the image is
+	// attached to.
+	poolID, err := dbStoragePoolGetID(d.db, storagePool)
+	if err != nil {
+		return err
+	}
+
+	// Delete the storage volume for image from the storage pool.
+	err = dbStoragePoolVolumeDelete(d.db, imgInfo.Fingerprint, storagePoolVolumeTypeImage, poolID)
+	if err != nil {
+		return err
+	}
+
+	// If the original number of pools on which that image existed is
+	// greater than 1 we know that there is at least one more pool on which
+	// a storage volume for this image exists. So don't delete the
+	// compressed image files and don't delete the image from the image
+	// database.
+	if len(poolIDs) > 1 {
+		return nil
+	}
+
+	// Remove main image file.
 	fname := shared.VarPath("images", imgInfo.Fingerprint)
 	if shared.PathExists(fname) {
 		err = os.Remove(fname)
@@ -964,7 +1099,7 @@ func doDeleteImage(d *Daemon, fingerprint string) error {
 		}
 	}
 
-	// Remove the rootfs file
+	// Remove the rootfs file for the image.
 	fname = shared.VarPath("images", imgInfo.Fingerprint) + ".rootfs"
 	if shared.PathExists(fname) {
 		err = os.Remove(fname)
@@ -973,7 +1108,7 @@ func doDeleteImage(d *Daemon, fingerprint string) error {
 		}
 	}
 
-	// Remove the DB entry
+	// Remove the database entry for the image.
 	if err = dbImageDelete(d.db, id); err != nil {
 		return err
 	}
@@ -983,9 +1118,19 @@ func doDeleteImage(d *Daemon, fingerprint string) error {
 
 func imageDelete(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
+	storagePool := mux.Vars(r)["storage_pool"]
+	if storagePool == "" {
+		storagePool = daemonConfig["images.default_storage_pool"].Get()
+		if storagePool == "" {
+			storagePool = daemonConfig["storage.default_pool"].Get()
+		}
+		if storagePool == "" {
+			return BadRequest(fmt.Errorf("You must provide the name of the storage pool from which to delete the image."))
+		}
+	}
 
 	rmimg := func(op *operation) error {
-		return doDeleteImage(d, fingerprint)
+		return doDeleteImage(d, fingerprint, storagePool)
 	}
 
 	resources := map[string][]string{}
diff --git a/shared/api/image.go b/shared/api/image.go
index e22667c..73b017b 100644
--- a/shared/api/image.go
+++ b/shared/api/image.go
@@ -13,6 +13,9 @@ type ImagesPost struct {
 
 	// API extension: image_compression_algorithm
 	CompressionAlgorithm string `json:"compression_algorithm"`
+
+	// API extension: storage
+	StoragePool string `json:"storage_pool"`
 }
 
 // ImagePut represents the modifiable fields of a LXD image
@@ -20,6 +23,8 @@ type ImagePut struct {
 	AutoUpdate bool              `json:"auto_update"`
 	Properties map[string]string `json:"properties"`
 	Public     bool              `json:"public"`
+
+	StoragePool string `json:"storage_pool"`
 }
 
 // Image represents a LXD image

From 7e0a80eba99edff9ea473fdf226f91ec73d6689a Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 12:41:44 +0100
Subject: [PATCH 17/63] lxd/daemon: report all storage types in use

Report what storage types are currently in use on a given LXD instance and
correctly report the version of the tools used to create these storage types.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/api_1.0.go          | 67 +++++++++++++++++++++++++++++++++++++++++++++++--
 lxd/db_storage_pools.go | 32 ++++++++++++++++++++++-
 2 files changed, 96 insertions(+), 3 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 9b42a53..ae1932b 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -6,6 +6,8 @@ import (
 	"net/http"
 	"os"
 	"reflect"
+	"sync"
+	"sync/atomic"
 	"syscall"
 
 	"gopkg.in/lxc/go-lxc.v2"
@@ -16,6 +18,15 @@ import (
 	"github.com/lxc/lxd/shared/version"
 )
 
+var storagePoolDriversCache []string
+var storagePoolDriversCacheInitialized bool
+var storagePoolDriversCacheVal atomic.Value
+var storagePoolDriversCacheMu sync.Mutex
+
+func readStoragePoolDriversCache() []string {
+	return storagePoolDriversCacheVal.Load().([]string)
+}
+
 var api10 = []Command{
 	containersCmd,
 	containerCmd,
@@ -169,12 +180,64 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		Kernel:             kernel,
 		KernelArchitecture: kernelArchitecture,
 		KernelVersion:      kernelVersion,
-		Storage:            d.Storage.GetStorageTypeName(),
-		StorageVersion:     d.Storage.GetStorageTypeVersion(),
 		Server:             "lxd",
 		ServerPid:          os.Getpid(),
 		ServerVersion:      version.Version}
 
+	// COMMENT(brauner): Get a list of all storage drivers currently in use
+	// on this LXD instance. Only do this when we do not already have done
+	// this once to avoid unnecessarily querying the db. All subsequent
+	// updates of the cache will be done when we create or delete storage
+	// pools in the db. Since this is a rare event, this cache
+	// implementation is a classic frequent-read, rare-update case so
+	// copy-on-write semantics without locking in the read case seems
+	// appropriate. (Should be cheaper then querying the db all the time,
+	// especially if we keep adding more storage drivers.)
+	if !storagePoolDriversCacheInitialized {
+		tmp, err := dbStoragePoolsGetDrivers(d.db)
+		if err != nil && err != NoSuchObjectError {
+			return InternalError(err)
+		}
+
+		storagePoolDriversCacheMu.Lock()
+		storagePoolDriversCacheVal.Store(tmp)
+		storagePoolDriversCacheMu.Unlock()
+
+		storagePoolDriversCacheInitialized = true
+	}
+
+	drivers := readStoragePoolDriversCache()
+	// COMMENT(brauner): Currently no storage pools are configured/no
+	// drivers are in use.
+	nDrivers := len(drivers)
+	if nDrivers == 0 {
+		env.Storage = ""
+		env.StorageVersion = ""
+	}
+
+	for i := 0; i < nDrivers; i++ {
+		// COMMENT(brauner): Initialize a core storage interface for the
+		// given driver.
+		sCore, err := storagePoolCoreInit(drivers[i])
+		if err != nil {
+			continue
+		}
+
+		if env.Storage != "" {
+			env.Storage = env.Storage + " | " + drivers[i]
+		} else {
+			env.Storage = drivers[i]
+		}
+
+		// Get the version of the storage drivers in use.
+		sVersion := sCore.GetStorageTypeVersion()
+		if env.StorageVersion != "" {
+			env.StorageVersion = env.StorageVersion + " | " + sVersion
+		} else {
+			env.StorageVersion = sVersion
+		}
+	}
+
 	fullSrv := api.Server{ServerUntrusted: srv}
 	fullSrv.Environment = env
 	fullSrv.Config = daemonConfigRender()
diff --git a/lxd/db_storage_pools.go b/lxd/db_storage_pools.go
index 69daf3e..e11d207 100644
--- a/lxd/db_storage_pools.go
+++ b/lxd/db_storage_pools.go
@@ -5,6 +5,7 @@ import (
 
 	_ "github.com/mattn/go-sqlite3"
 
+	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 )
 
@@ -174,6 +175,18 @@ func dbStoragePoolCreate(db *sql.DB, poolName string, poolDriver string, poolCon
 		return -1, err
 	}
 
+	// COMMENT(brauner): Update the storage drivers cache in api_1.0.go.
+	storagePoolDriversCacheMu.Lock()
+	tmp := storagePoolDriversCacheVal.Load()
+	if tmp != nil {
+		typedTmp := tmp.([]string)
+		if !shared.StringInSlice(poolDriver, typedTmp) {
+			typedTmp = append(typedTmp, poolDriver)
+		}
+		storagePoolDriversCacheVal.Store(typedTmp)
+	}
+	storagePoolDriversCacheMu.Unlock()
+
 	return id, nil
 }
 
@@ -236,7 +249,7 @@ func dbStoragePoolConfigClear(tx *sql.Tx, poolID int64) error {
 
 // Delete storage pool.
 func dbStoragePoolDelete(db *sql.DB, poolName string) error {
-	poolID, _, err := dbStoragePoolGet(db, poolName)
+	poolID, pool, err := dbStoragePoolGet(db, poolName)
 	if err != nil {
 		return err
 	}
@@ -258,6 +271,23 @@ func dbStoragePoolDelete(db *sql.DB, poolName string) error {
 		return err
 	}
 
+	// COMMENT(brauner): Update the storage drivers cache in api_1.0.go.
+	storagePoolDriversCacheMu.Lock()
+	tmp := storagePoolDriversCacheVal.Load()
+	if tmp != nil {
+		typedTmp := tmp.([]string)
+		for i := 0; i < len(typedTmp); i++ {
+			if typedTmp[i] == pool.PoolDriver {
+				typedTmp[i] = typedTmp[len(typedTmp)-1]
+				typedTmp[len(typedTmp)-1] = ""
+				typedTmp = typedTmp[:len(typedTmp)-1]
+				break
+			}
+		}
+		storagePoolDriversCacheVal.Store(typedTmp)
+	}
+	storagePoolDriversCacheMu.Unlock()
+
 	return txCommit(tx)
 }
 

From a71bd953c6523b922b11fac6456f3b2edae8c642 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 12:48:10 +0100
Subject: [PATCH 18/63] lxd/init: use storage api to create initial pool

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/main_init.go | 97 ++++++++++++++++++++++++++------------------------------
 1 file changed, 45 insertions(+), 52 deletions(-)

diff --git a/lxd/main_init.go b/lxd/main_init.go
index 36b0114..764cdc5 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -18,6 +18,7 @@ import (
 
 func cmdInit() error {
 	var defaultPrivileged int // controls whether we set security.privileged=true
+	var storageSetup bool     // dir or zfs
 	var storageBackend string // dir or zfs
 	var storageMode string    // existing, loop or device
 	var storageLoopSize int64 // Size in GB
@@ -242,19 +243,22 @@ func cmdInit() error {
 			defaultStorage = "zfs"
 		}
 
-		storageBackend = askChoice(fmt.Sprintf("Name of the storage backend to use (dir or zfs) [default=%s]: ", defaultStorage), backendsSupported, defaultStorage)
+		storageSetup = askBool("Do you want to configure a new storage pool (yes/no) [default=yes]? ", "yes")
+		if storageSetup {
+			storagePool = askString("Name of the new storage pool [default=default]: ", "default", nil)
+			storageBackend = askChoice(fmt.Sprintf("Name of the storage backend to use (dir or zfs) [default=%s]: ", defaultStorage), backendsSupported, defaultStorage)
 
-		if !shared.StringInSlice(storageBackend, backendsSupported) {
-			return fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", storageBackend)
-		}
+			if !shared.StringInSlice(storageBackend, backendsSupported) {
+				return fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", storageBackend)
+			}
 
-		if !shared.StringInSlice(storageBackend, backendsAvailable) {
-			return fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", storageBackend)
+			if !shared.StringInSlice(storageBackend, backendsAvailable) {
+				return fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", storageBackend)
+			}
 		}
 
-		if storageBackend == "zfs" {
+		if storageSetup && storageBackend == "zfs" {
 			if askBool("Create a new ZFS pool (yes/no) [default=yes]? ", "yes") {
-				storagePool = askString("Name of the new ZFS pool [default=lxd]: ", "lxd", nil)
 				if askBool("Would you like to use an existing block device (yes/no) [default=no]? ", "no") {
 					deviceExists := func(path string) error {
 						if !shared.IsBlockdevPath(path) {
@@ -280,7 +284,7 @@ func cmdInit() error {
 						def = 15
 					}
 
-					q := fmt.Sprintf("Size in GB of the new loop device (1GB minimum) [default=%d]: ", def)
+					q := fmt.Sprintf("Size in GB of the new loop device (1GB minimum) [default=%dGB]: ", def)
 					storageLoopSize = askInt(q, 1, -1, fmt.Sprintf("%d", def))
 					storageMode = "loop"
 				}
@@ -359,62 +363,51 @@ they otherwise would.
 		}
 	}
 
-	if !shared.StringInSlice(storageBackend, []string{"dir", "zfs"}) {
-		return fmt.Errorf("Invalid storage backend: %s", storageBackend)
-	}
-
-	// Unset all storage keys, core.https_address and core.trust_password
-	for _, key := range []string{"storage.zfs_pool_name", "core.https_address", "core.trust_password"} {
-		_, err = c.SetServerConfig(key, "")
-		if err != nil {
-			return err
+	if storageSetup {
+		if !shared.StringInSlice(storageBackend, []string{"dir", "zfs"}) {
+			return fmt.Errorf("Invalid storage backend: %s", storageBackend)
 		}
-	}
-
-	// Destroy any existing loop device
-	for _, file := range []string{"zfs.img"} {
-		os.Remove(shared.VarPath(file))
-	}
-
-	if storageBackend == "zfs" {
-		if storageMode == "loop" {
-			storageDevice = shared.VarPath("zfs.img")
-			f, err := os.Create(storageDevice)
-			if err != nil {
-				return fmt.Errorf("Failed to open %s: %s", storageDevice, err)
-			}
 
-			err = f.Chmod(0600)
+		// Unset all storage keys, core.https_address and core.trust_password
+		for _, key := range []string{"storage.zfs_pool_name", "core.https_address", "core.trust_password"} {
+			_, err = c.SetServerConfig(key, "")
 			if err != nil {
-				return fmt.Errorf("Failed to chmod %s: %s", storageDevice, err)
+				return err
 			}
+		}
 
-			err = f.Truncate(int64(storageLoopSize * 1024 * 1024 * 1024))
-			if err != nil {
-				return fmt.Errorf("Failed to create sparse file %s: %s", storageDevice, err)
-			}
+		// Destroy any existing loop device
+		for _, file := range []string{"zfs.img"} {
+			os.Remove(shared.VarPath(file))
+		}
 
-			err = f.Close()
-			if err != nil {
-				return fmt.Errorf("Failed to close %s: %s", storageDevice, err)
-			}
+		storageConfig := map[string]string{}
+		storageConfig["source"] = storageDevice
+		if storageBackend != "dir" {
+			storageConfig["size"] = strconv.FormatInt(storageLoopSize, 10) + "GB"
+		}
+		// TODO(brauner): handle storage mode
+		if storageMode == "" {
 		}
 
-		if shared.StringInSlice(storageMode, []string{"loop", "device"}) {
-			output, err := exec.Command(
-				"zpool",
-				"create", storagePool, storageDevice,
-				"-f", "-m", "none", "-O", "compression=on").CombinedOutput()
-			if err != nil {
-				return fmt.Errorf("Failed to create the ZFS pool: %s", output)
-			}
+		// Create the requested storage pool.
+		err := c.StoragePoolCreate(storagePool, storageBackend, storageConfig)
+		if err != nil {
+			return err
 		}
 
-		// Configure LXD to use the pool
-		_, err = c.SetServerConfig("storage.zfs_pool_name", storagePool)
+		// Mark the storage pool as default pool.
+		_, err = c.SetServerConfig("storage.default_pool", storagePool)
 		if err != nil {
 			return err
 		}
+
+		if storageBackend == "zfs" {
+			_, err = c.SetServerConfig("storage.zfs_pool_name", storagePool)
+			if err != nil {
+				return err
+			}
+		}
 	}
 
 	if defaultPrivileged == 0 {

From 50f73cd18908cd976c9c933bf9e2b1c73d88a48c Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 14:13:24 +0100
Subject: [PATCH 19/63] lxd/daemon: check all storage pools on startup

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/daemon.go | 40 +++++++++++++++++-----------------------
 1 file changed, 17 insertions(+), 23 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 6bc6c90..adf65a4 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -353,37 +353,31 @@ func (d *Daemon) createCmd(version string, c Command) {
 }
 
 func (d *Daemon) SetupStorageDriver() error {
-	var err error
-
-	lvmVgName := daemonConfig["storage.lvm_vg_name"].Get()
-	zfsPoolName := daemonConfig["storage.zfs_pool_name"].Get()
-
-	if lvmVgName != "" {
-		d.Storage, err = newStorage(d, storageTypeLvm)
-		if err != nil {
-			shared.LogErrorf("Could not initialize storage type LVM: %s - falling back to dir", err)
-		} else {
+	pools, err := dbStoragePools(d.db)
+	if err != nil {
+		if err == NoSuchObjectError {
+			shared.LogDebugf("No existing storage pools detected.")
 			return nil
 		}
-	} else if zfsPoolName != "" {
-		d.Storage, err = newStorage(d, storageTypeZfs)
+		shared.LogDebugf("Failed to retrieve existing storage pools.")
+		return err
+	}
+
+	for _, pool := range pools {
+		shared.LogDebugf("Initializing and checking storage pool \"%s\".", pool)
+		ps, err := storagePoolInit(d, pool)
 		if err != nil {
-			shared.LogErrorf("Could not initialize storage type ZFS: %s - falling back to dir", err)
-		} else {
-			return nil
+			shared.LogErrorf("Error initializing storage pool \"%s\": %s. Correct functionality of the storage pool cannot be guaranteed.", pool, err)
+			continue
 		}
-	} else if d.BackingFs == "btrfs" {
-		d.Storage, err = newStorage(d, storageTypeBtrfs)
+
+		err = ps.StoragePoolCheck()
 		if err != nil {
-			shared.LogErrorf("Could not initialize storage type btrfs: %s - falling back to dir", err)
-		} else {
-			return nil
+			shared.LogErrorf("Error checking storage pool \"%s\": %s. Correct functionality of the storage pool cannot be guaranteed.", pool, err)
 		}
 	}
 
-	d.Storage, err = newStorage(d, storageTypeDir)
-
-	return err
+	return nil
 }
 
 // have we setup shared mounts?

From 84250e681f9aa3a21c096f7d0e207bb4d4f4962b Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 15:09:00 +0100
Subject: [PATCH 20/63] lxd/storage: remove obosolete initializers

- newStorage()
- newStorageWithConfig()

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage.go | 64 ----------------------------------------------------------
 1 file changed, 64 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index 2daadd1..446ad9f 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -247,11 +247,6 @@ type storage interface {
 	MigrationSink(live bool, container container, objects []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet, op *operation) error
 }
 
-func newStorage(d *Daemon, sType storageType) (storage, error) {
-	var nilmap map[string]interface{}
-	return newStorageWithConfig(d, sType, nilmap)
-}
-
 func storageWrapperInit(d *Daemon, poolName string, volumeName string, volumeType int) (*storageLogWrapper, error) {
 	var s storageLogWrapper
 
@@ -399,65 +394,6 @@ func storagePoolVolumeInit(d *Daemon, poolName string, volumeName string, volume
 	return storage, nil
 }
 
-func newStorageWithConfig(d *Daemon, sType storageType, config map[string]interface{}) (storage, error) {
-	if d.MockMode {
-		return d.Storage, nil
-	}
-
-	var s storage
-
-	switch sType {
-	case storageTypeBtrfs:
-		if d.Storage != nil && d.Storage.GetStorageType() == storageTypeBtrfs {
-			return d.Storage, nil
-		}
-
-		btrfs := storageBtrfs{}
-		btrfs.pool = &api.StoragePool{}
-		btrfs.d = d
-		s = &storageLogWrapper{w: &btrfs}
-	case storageTypeZfs:
-		if d.Storage != nil && d.Storage.GetStorageType() == storageTypeZfs {
-			return d.Storage, nil
-		}
-
-		zfs := storageZfs{}
-		zfs.pool = &api.StoragePool{}
-		zfs.d = d
-		s = &storageLogWrapper{w: &zfs}
-	case storageTypeLvm:
-		if d.Storage != nil && d.Storage.GetStorageType() == storageTypeLvm {
-			return d.Storage, nil
-		}
-
-		lvm := storageLvm{}
-		lvm.pool = &api.StoragePool{}
-		lvm.d = d
-		s = &storageLogWrapper{w: &lvm}
-	default:
-		if d.Storage != nil && d.Storage.GetStorageType() == storageTypeDir {
-			return d.Storage, nil
-		}
-
-		dir := storageDir{}
-		dir.pool = &api.StoragePool{}
-		dir.d = d
-		s = &storageLogWrapper{w: &dir}
-	}
-
-	storage, err := s.StoragePoolInit(config)
-	if err != nil {
-		return nil, err
-	}
-
-	err = s.StoragePoolCheck()
-	if err != nil {
-		return nil, err
-	}
-
-	return storage, nil
-}
-
 type storageCore struct {
 	sType        storageType
 	sTypeName    string

From 932e0d142eaf5bef0e97927a332db7505b8d2251 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 15:17:05 +0100
Subject: [PATCH 21/63] images: pass storage type to unpack

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/images.go        | 10 +++++-----
 lxd/storage_btrfs.go |  2 +-
 lxd/storage_dir.go   |  2 +-
 lxd/storage_lvm.go   |  2 +-
 lxd/storage_zfs.go   |  2 +-
 5 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/lxd/images.go b/lxd/images.go
index 9aea16b..a283c28 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -80,7 +80,7 @@ func detectCompression(fname string) ([]string, string, error) {
 
 }
 
-func unpack(d *Daemon, file string, path string) error {
+func unpack(d *Daemon, file string, path string, sType storageType) error {
 	extractArgs, extension, err := detectCompression(file)
 	if err != nil {
 		return err
@@ -129,7 +129,7 @@ func unpack(d *Daemon, file string, path string) error {
 
 		// Check if we're running out of space
 		if int64(fs.Bfree) < int64(2*fs.Bsize) {
-			if d.Storage.GetStorageType() == storageTypeLvm {
+			if sType == storageTypeLvm {
 				return fmt.Errorf("Unable to unpack image, run out of disk space (consider increasing storage.lvm_volume_size).")
 			} else {
 				return fmt.Errorf("Unable to unpack image, run out of disk space.")
@@ -149,8 +149,8 @@ func unpack(d *Daemon, file string, path string) error {
 	return nil
 }
 
-func unpackImage(d *Daemon, imagefname string, destpath string) error {
-	err := unpack(d, imagefname, destpath)
+func unpackImage(d *Daemon, imagefname string, destpath string, sType storageType) error {
+	err := unpack(d, imagefname, destpath, sType)
 	if err != nil {
 		return err
 	}
@@ -162,7 +162,7 @@ func unpackImage(d *Daemon, imagefname string, destpath string) error {
 			return fmt.Errorf("Error creating rootfs directory")
 		}
 
-		err = unpack(d, imagefname+".rootfs", rootfsPath)
+		err = unpack(d, imagefname+".rootfs", rootfsPath, sType)
 		if err != nil {
 			return err
 		}
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 0119c49..88ec8ba 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -485,7 +485,7 @@ func (s *storageBtrfs) ImageCreate(fingerprint string) error {
 		return err
 	}
 
-	if err := unpackImage(s.d, imagePath, subvol); err != nil {
+	if err := unpackImage(s.d, imagePath, subvol, storageTypeBtrfs); err != nil {
 		s.btrfsPoolVolumeDelete(subvol)
 		return err
 	}
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index 936e357..e936c24 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -133,7 +133,7 @@ func (s *storageDir) ContainerCreateFromImage(
 	}
 
 	imagePath := shared.VarPath("images", imageFingerprint)
-	if err := unpackImage(s.d, imagePath, container.Path()); err != nil {
+	if err := unpackImage(s.d, imagePath, container.Path(), storageTypeDir); err != nil {
 		s.ContainerDelete(container)
 		return err
 	}
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 0bc9b71..7dc72b8 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -795,7 +795,7 @@ func (s *storageLvm) ImageCreate(fingerprint string) error {
 		return fmt.Errorf("Error mounting image LV: %v", err)
 	}
 
-	unpackErr := unpackImage(s.d, finalName, tempLVMountPoint)
+	unpackErr := unpackImage(s.d, finalName, tempLVMountPoint, storageTypeLvm)
 
 	err = tryUnmount(tempLVMountPoint, 0)
 	if err != nil {
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 3fe22d7..13c2e44 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -817,7 +817,7 @@ func (s *storageZfs) ImageCreate(fingerprint string) error {
 	}
 
 	// Unpack the image into the temporary mountpoint.
-	err = unpackImage(s.d, imagePath, tmpImageDir)
+	err = unpackImage(s.d, imagePath, tmpImageDir, storageTypeZfs)
 	if err != nil {
 		return cleanup(err)
 	}

From 12b503906d9ce3b41efd1463ab7307ce32265878 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 15:17:55 +0100
Subject: [PATCH 22/63] daemon: remove storage argument

This is not needed anymore.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/api_internal.go | 36 ++++++++++++++++++++++++++++++------
 lxd/daemon.go       |  2 --
 2 files changed, 30 insertions(+), 8 deletions(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 0fc97f4..b77de0b 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -122,19 +122,41 @@ func internalImport(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("target is required"))
 	}
 
-	path := containerPath(name, false)
-	err := d.Storage.ContainerStart(name, path)
-	if err != nil {
-		return SmartError(err)
+	// COMMENT(brauner): Check if we received a storage pool on which the
+	// storage volume for the container is supposed to be created.
+	poolName := mux.Vars(r)["pool_name"]
+	if poolName == "" {
+		return BadRequest(fmt.Errorf("pool name is required"))
 	}
 
-	defer d.Storage.ContainerStop(name, path)
-
 	sf, err := slurpBackupFile(shared.VarPath("containers", name, "backup.yaml"))
 	if err != nil {
 		return SmartError(err)
 	}
 
+	// COMMENT(brauner): Override the original storage pool where the
+	// container once existed.
+	sf.Container.StoragePool = poolName
+	if sf.Container.StoragePool == "" {
+		// COMMENT(brauner): If no storage pool is found, error out.
+		pools, err := dbStoragePools(d.db)
+		if err != nil && err != NoSuchObjectError {
+			return InternalError(err)
+		}
+
+		if len(pools) == 0 {
+			return BadRequest(fmt.Errorf("No storage pool found. Please create a new storage pool."))
+		}
+
+		// COMMENT(brauner): If we receive no storage pool, check
+		// whether a default storage pool is set. If no default storage
+		// pool is set, choose a random pool.
+		sf.Container.StoragePool = daemonConfig["storage.default_pool"].Get()
+		if sf.Container.StoragePool == "" {
+			sf.Container.StoragePool = pools[0]
+		}
+	}
+
 	baseImage := sf.Container.Config["volatile.base_image"]
 	for k := range sf.Container.Config {
 		if strings.HasPrefix(k, "volatile") {
@@ -158,6 +180,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 		Name:         sf.Container.Name,
 		Profiles:     sf.Container.Profiles,
 		Stateful:     sf.Container.Stateful,
+		StoragePool:  sf.Container.StoragePool,
 	})
 	if err != nil {
 		return SmartError(err)
@@ -188,6 +211,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 			Name:         snap.Name,
 			Profiles:     snap.Profiles,
 			Stateful:     snap.Stateful,
+			StoragePool:  sf.Container.StoragePool,
 		})
 		if err != nil {
 			return SmartError(err)
diff --git a/lxd/daemon.go b/lxd/daemon.go
index adf65a4..18b2dc3 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -80,8 +80,6 @@ type Daemon struct {
 	shutdownChan        chan bool
 	resetAutoUpdateChan chan bool
 
-	Storage storage
-
 	TCPSocket  *Socket
 	UnixSocket *Socket
 

From 160a97ae77a0310d7593298fbd379b96d98bb416 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 12 Jan 2017 12:51:59 +0100
Subject: [PATCH 23/63] lxd/container: add "pool" key to root disk device

The "pool" key identifies the storage pool a container's storage volume belongs
to. For now we do not allow the storage pool of a container's storage volume to
change. This is future work!

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/container.go     |  2 ++
 lxd/container_lxc.go | 54 +++++++++++++++++++++++++++++++++++-----------------
 2 files changed, 39 insertions(+), 17 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 319361f..fe46a8a 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -135,6 +135,8 @@ func containerValidDeviceConfigKey(t, k string) bool {
 			return true
 		case "recursive":
 			return true
+		case "pool":
+			return true
 		default:
 			return false
 		}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 409617c..3ddebaa 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -239,7 +239,7 @@ func containerLXCCreate(d *Daemon, args containerArgs) (container, error) {
 			deviceName += "_"
 		}
 
-		c.localDevices[deviceName] = types.Device{"type": "disk", "path": "/"}
+		c.localDevices[deviceName] = types.Device{"type": "disk", "path": "/", "pool": args.StoragePool}
 
 		updateArgs := containerArgs{
 			Architecture: c.architecture,
@@ -368,6 +368,18 @@ func containerLXCLoad(d *Daemon, args containerArgs) (container, error) {
 	}
 	c.storage = s
 
+	// COMMENT(brauner): args.StoragePool can be empty here. In that case we
+	// simply set c.storagePool to the storage pool we detected based on the
+	// containers name. (Container names are globally unique.)
+	if args.StoragePool == "" {
+		c.storagePool = c.storage.GetContainerPool()
+	} else if args.StoragePool != c.storage.GetContainerPool() {
+		// COMMENT(brauner): If the storage pool passed in does not
+		// match the storage pool we reverse engineered based on the
+		// containers name we know something is messed up.
+		return nil, fmt.Errorf("Container is supposed to exist on storage pool \"%s\", but it actually exists on \"%s\".", args.StoragePool, c.storage.GetContainerPool())
+	}
+
 	// Load the config
 	err = c.init()
 	if err != nil {
@@ -3149,25 +3161,33 @@ func (c *containerLXC) Update(args containerArgs, userRequested bool) error {
 		}
 	}
 
-	// Apply the live changes
-	if c.IsRunning() {
-		// Confirm that the rootfs source didn't change
-		var oldRootfs types.Device
-		for _, m := range oldExpandedDevices {
-			if m["type"] == "disk" && m["path"] == "/" {
-				oldRootfs = m
-				break
-			}
+	// Confirm that the storage pool didn't change.
+	var oldRootfs types.Device
+	for _, m := range oldExpandedDevices {
+		if m["type"] == "disk" && m["path"] == "/" {
+			oldRootfs = m
+			break
 		}
+	}
 
-		var newRootfs types.Device
-		for _, name := range c.expandedDevices.DeviceNames() {
-			m := c.expandedDevices[name]
-			if m["type"] == "disk" && m["path"] == "/" {
-				newRootfs = m
-				break
-			}
+	var newRootfs types.Device
+	for _, name := range c.expandedDevices.DeviceNames() {
+		m := c.expandedDevices[name]
+		if m["type"] == "disk" && m["path"] == "/" {
+			newRootfs = m
+			break
 		}
+	}
+
+	// TODO(brauner): Allow users to change the pool a container
+	// belongs to by editing the "pool" config key of the "root"
+	// device.
+	if oldRootfs["pool"] != newRootfs["pool"] {
+		return fmt.Errorf("Changing the storage pool of a container is not yet implemented.")
+	}
+
+	// Apply the live changes
+	if c.IsRunning() {
 
 		if oldRootfs["source"] != newRootfs["source"] {
 			return fmt.Errorf("Cannot change the rootfs path of a running container")

From 58618d9bc047cafe3f92810bab75a85ea3eec1f7 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Fri, 13 Jan 2017 00:18:38 +0100
Subject: [PATCH 24/63] lxd/container: implement attaching storage volumes

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/container.go     | 11 +++++++++
 lxd/container_lxc.go | 64 +++++++++++++++++++++++++++++++++++++++++++++++----
 lxd/storage.go       | 10 ++++++++
 lxd/storage_btrfs.go |  8 +++++++
 lxd/storage_dir.go   |  8 +++++++
 lxd/storage_lvm.go   |  8 +++++++
 lxd/storage_zfs.go   | 65 ++++++++++++++++++++++++++++++++++++++++++++++++----
 7 files changed, 164 insertions(+), 10 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index fe46a8a..4acc8db 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -5,6 +5,7 @@ import (
 	"io"
 	"os"
 	"os/exec"
+	"path/filepath"
 	"strings"
 	"time"
 
@@ -274,6 +275,16 @@ func containerValidDevices(devices types.Devices, profile bool, expanded bool) e
 			if (m["path"] == "/" || !shared.IsDir(m["source"])) && m["recursive"] != "" {
 				return fmt.Errorf("The recursive option is only supported for additional bind-mounted paths.")
 			}
+
+			if m["pool"] != "" {
+				if storageValidName(m["pool"]) != nil {
+					return fmt.Errorf("The specified storage pool name is not valid.")
+				}
+				if filepath.IsAbs(m["source"]) {
+					return fmt.Errorf("Storage volumes cannot be specified as absolute paths.")
+				}
+			}
+
 		} else if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) {
 			if m["path"] == "" {
 				return fmt.Errorf("Unix device entry is missing the required \"path\" property.")
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 3ddebaa..8862892 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -362,6 +362,7 @@ func containerLXCLoad(d *Daemon, args containerArgs) (container, error) {
 		storagePool:  args.StoragePool,
 	}
 
+	// COMMENT(brauner): Initialize storage interface for this container.
 	s, err := storagePoolVolumeContainerLoadInit(d, args.Name)
 	if err != nil {
 		return nil, err
@@ -1311,7 +1312,14 @@ func (c *containerLXC) initLXC() error {
 			isOptional := shared.IsTrue(m["optional"])
 			isReadOnly := shared.IsTrue(m["readonly"])
 			isRecursive := shared.IsTrue(m["recursive"])
-			isFile := !shared.IsDir(srcPath) && !deviceIsBlockdev(srcPath)
+
+			// COMMENT(brauner): If we want to mount a storage
+			// volume from a storage pool we created via our storage
+			// api, we are always mounting a directory.
+			isFile := false
+			if m["pool"] == "" {
+				isFile = !shared.IsDir(srcPath) && !deviceIsBlockdev(srcPath)
+			}
 
 			// Deal with a rootfs
 			if tgtPath == "" {
@@ -1500,7 +1508,12 @@ func (c *containerLXC) startCommon() (string, error) {
 		m := c.expandedDevices[name]
 		switch m["type"] {
 		case "disk":
-			if m["source"] != "" && !shared.PathExists(m["source"]) {
+			// COMMENT(brauner): When we want to attach a storage
+			// volume created via the storage api m["source"] only
+			// contains the name of the storage volume, not the path
+			// where it is mounted. So do only check for the
+			// existence of m["source"] when m["pool"] is empty.
+			if m["pool"] == "" && m["source"] != "" && !shared.PathExists(m["source"]) {
 				return "", fmt.Errorf("Missing source '%s' for disk '%s'", m["source"], name)
 			}
 		case "nic":
@@ -3182,13 +3195,12 @@ func (c *containerLXC) Update(args containerArgs, userRequested bool) error {
 	// TODO(brauner): Allow users to change the pool a container
 	// belongs to by editing the "pool" config key of the "root"
 	// device.
-	if oldRootfs["pool"] != newRootfs["pool"] {
+	if oldRootfs["pool"] != "" && (oldRootfs["pool"] != newRootfs["pool"]) {
 		return fmt.Errorf("Changing the storage pool of a container is not yet implemented.")
 	}
 
 	// Apply the live changes
 	if c.IsRunning() {
-
 		if oldRootfs["source"] != newRootfs["source"] {
 			return fmt.Errorf("Cannot change the rootfs path of a running container")
 		}
@@ -5727,7 +5739,49 @@ func (c *containerLXC) createDiskDevice(name string, m types.Device) (string, er
 	isOptional := shared.IsTrue(m["optional"])
 	isReadOnly := shared.IsTrue(m["readonly"])
 	isRecursive := shared.IsTrue(m["recursive"])
-	isFile := !shared.IsDir(srcPath) && !deviceIsBlockdev(srcPath)
+
+	isFile := false
+	if m["pool"] == "" {
+		isFile = !shared.IsDir(srcPath) && !deviceIsBlockdev(srcPath)
+	}
+
+	// COMMENT(brauner): Deal with mounting storage volumes created via the
+	// storage api.
+	if m["pool"] != "" {
+		// COMMENT(brauner): Extract the name of the storage volume that
+		// we are supposed to attach. We assume that the only
+		// syntactically valid ways of specifying a storage volume are:
+		// - vol1
+		// - storage_type/vol1
+		volumeName := filepath.Clean(m["source"])
+		slash := strings.Index(volumeName, "/")
+		if (slash > 0) && (len(volumeName) > slash) {
+			volumeName = volumeName[(slash + 1):]
+		}
+
+		// COMMENT(brauner): Check if it is the rootfs of another
+		// container that we're supposed to mount. If not it must be a
+		// custom volume.
+		volumeType := storagePoolVolumeTypeCustom
+		if strings.HasSuffix(m["source"], storagePoolVolumeApiEndpointContainers+"/") {
+			volumeType = storagePoolVolumeTypeContainer
+			srcPath = shared.VarPath("storage-pools", m["pool"], m["source"])
+		} else {
+			srcPath = shared.VarPath("storage-pools", m["pool"], storagePoolVolumeApiEndpointCustom, m["source"])
+		}
+
+		// COMMENT(brauner): Initialize a new storage interface and
+		// check if the pool/volume is mounted. If it is not, mount it.
+		s, err := storagePoolVolumeInit(c.daemon, m["pool"], volumeName, volumeType)
+		if err != nil && !isOptional {
+			return "", fmt.Errorf("Failed to initialize storage volume \"%s\" on storage pool \"%s\": %s.", volumeName, m["pool"], err)
+		} else if err == nil {
+			err := s.StoragePoolVolumeMount()
+			if err != nil {
+				shared.LogWarnf("Could not mount storage volume \"%s\" on storage pool \"%s\": %s.", volumeName, m["pool"], err)
+			}
+		}
+	}
 
 	// Check if the source exists
 	if !shared.PathExists(srcPath) {
diff --git a/lxd/storage.go b/lxd/storage.go
index 446ad9f..45bbbc7 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -185,6 +185,8 @@ type storage interface {
 	// Functions dealing with storage volumes.
 	StoragePoolVolumeCreate() error
 	StoragePoolVolumeDelete() error
+	StoragePoolVolumeMount() error
+	StoragePoolVolumeUmount() error
 	StoragePoolVolumeUpdate(changedConfig []string) error
 	GetStoragePoolVolumeWritable() api.StorageVolumePut
 	SetStoragePoolVolumeWritable(writable *api.StorageVolumePut)
@@ -545,6 +547,14 @@ func (lw *storageLogWrapper) StoragePoolVolumeDelete() error {
 	return lw.w.StoragePoolVolumeDelete()
 }
 
+func (lw *storageLogWrapper) StoragePoolVolumeMount() error {
+	return lw.w.StoragePoolVolumeMount()
+}
+
+func (lw *storageLogWrapper) StoragePoolVolumeUmount() error {
+	return lw.w.StoragePoolVolumeUmount()
+}
+
 func (lw *storageLogWrapper) StoragePoolDelete() error {
 	return lw.w.StoragePoolDelete()
 }
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 88ec8ba..d22a281 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -89,6 +89,14 @@ func (s *storageBtrfs) StoragePoolVolumeDelete() error {
 	return nil
 }
 
+func (s *storageBtrfs) StoragePoolVolumeMount() error {
+	return nil
+}
+
+func (s *storageBtrfs) StoragePoolVolumeUmount() error {
+	return nil
+}
+
 func (s *storageBtrfs) GetStoragePoolWritable() api.StoragePoolPut {
 	return s.pool.Writable()
 }
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index e936c24..8b8e361 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -71,6 +71,14 @@ func (s *storageDir) StoragePoolVolumeDelete() error {
 	return nil
 }
 
+func (s *storageDir) StoragePoolVolumeMount() error {
+	return nil
+}
+
+func (s *storageDir) StoragePoolVolumeUmount() error {
+	return nil
+}
+
 func (s *storageDir) GetStoragePoolWritable() api.StoragePoolPut {
 	return s.pool.Writable()
 }
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 7dc72b8..974514d 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -268,6 +268,14 @@ func (s *storageLvm) StoragePoolVolumeDelete() error {
 	return nil
 }
 
+func (s *storageLvm) StoragePoolVolumeMount() error {
+	return nil
+}
+
+func (s *storageLvm) StoragePoolVolumeUmount() error {
+	return nil
+}
+
 func (s *storageLvm) GetStoragePoolWritable() api.StoragePoolPut {
 	return s.pool.Writable()
 }
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 13c2e44..0d1030a 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -137,7 +137,7 @@ func (s *storageZfs) StoragePoolCreate() error {
 
 func (s *storageZfs) StoragePoolVolumeCreate() error {
 	fs := fmt.Sprintf("custom/%s", s.volume.VolumeName)
-	fsMountpoint := fmt.Sprintf("%s.zfs", shared.VarPath(fs))
+	fsMountpoint := fmt.Sprintf("%s@%s", shared.VarPath(fs), s.pool.PoolName)
 
 	err := s.zfsPoolVolumeCreate(fs)
 	if err != nil {
@@ -167,11 +167,66 @@ func (s *storageZfs) StoragePoolDelete() error {
 
 func (s *storageZfs) StoragePoolVolumeDelete() error {
 	fs := fmt.Sprintf("custom/%s", s.volume.VolumeName)
+	fsMountpoint := fmt.Sprintf("%s@%s", shared.VarPath(fs), s.pool.PoolName)
+
 	err := s.zfsPoolVolumeDestroy(fs)
 	if err != nil {
 		return err
 	}
 
+	if shared.PathExists(fsMountpoint) {
+		err := os.RemoveAll(fsMountpoint)
+		if err != nil {
+			shared.LogWarnf("Failed to remove mountpoint \"%s\" for storage volume \"%s\".", fsMountpoint, s.volume.VolumeName)
+		}
+	}
+
+	return nil
+}
+
+func (s *storageZfs) StoragePoolVolumeMount() error {
+	volApiEndpoint, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.VolumeType)
+	if err != nil {
+		return err
+	}
+
+	fs := fmt.Sprintf("%s/%s", volApiEndpoint, s.volume.VolumeName)
+	fsMountpoint := fmt.Sprintf("%s", shared.VarPath(fs))
+
+	if s.volume.VolumeType == storagePoolVolumeTypeNameCustom {
+		fsMountpoint = fmt.Sprintf("%s@%s", fsMountpoint, s.pool.PoolName)
+	}
+
+	if !shared.IsMountPoint(fsMountpoint) {
+		err := s.zfsPoolVolumeMount(fs)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (s *storageZfs) StoragePoolVolumeUmount() error {
+	volApiEndpoint, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.VolumeType)
+	if err != nil {
+		return err
+	}
+
+	fs := fmt.Sprintf("%s/%s", volApiEndpoint, s.volume.VolumeName)
+	fsMountpoint := fmt.Sprintf("%s", shared.VarPath(fs))
+
+	if s.volume.VolumeType == storagePoolVolumeTypeNameCustom {
+		fsMountpoint = fmt.Sprintf("%s@%s", fsMountpoint, s.pool.PoolName)
+	}
+
+	if shared.IsMountPoint(fsMountpoint) {
+		err := s.zfsPoolVolumeUmount(fs)
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
@@ -483,7 +538,7 @@ func (s *storageZfs) ContainerRename(container container, newName string) error
 	oldName := container.Name()
 
 	// Unmount the filesystem
-	err := s.zfsPoolVolumeUnmount(fmt.Sprintf("containers/%s", oldName))
+	err := s.zfsPoolVolumeUmount(fmt.Sprintf("containers/%s", oldName))
 	if err != nil {
 		return err
 	}
@@ -836,7 +891,7 @@ func (s *storageZfs) ImageCreate(fingerprint string) error {
 
 	// Make sure that the image actually got unmounted.
 	if shared.IsMountPoint(tmpImageDir) {
-		s.zfsPoolVolumeUnmount(fs)
+		s.zfsPoolVolumeUmount(fs)
 	}
 
 	// Create a snapshot of that image on the storage pool which we clone for
@@ -1338,7 +1393,7 @@ func (s *storageZfs) zfsPoolVolumeMount(path string) error {
 	return nil
 }
 
-func (s *storageZfs) zfsPoolVolumeUnmount(path string) error {
+func (s *storageZfs) zfsPoolVolumeUmount(path string) error {
 	output, err := tryExec(
 		"zfs",
 		"unmount",
@@ -1742,7 +1797,7 @@ func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*
 	 * unmounted, so we do this before receiving anything.
 	 */
 	zfsName := fmt.Sprintf("containers/%s", container.Name())
-	err := s.zfsPoolVolumeUnmount(zfsName)
+	err := s.zfsPoolVolumeUmount(zfsName)
 	if err != nil {
 		return err
 	}

From 37f986c12777c3f0c0d0e0b992bb0fb045835121 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sat, 14 Jan 2017 13:32:23 +0100
Subject: [PATCH 25/63] lxd/storage_pools*: implement used_by

This commit implements retrieving all users of a given storage pool.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_pools.go       | 19 +++++++++++++++++--
 lxd/storage_pools_utils.go | 32 ++++++++++++++++++++++++++++++++
 2 files changed, 49 insertions(+), 2 deletions(-)

diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 3afc072..6615aae 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -31,10 +31,18 @@ func storagePoolsGet(d *Daemon, r *http.Request) Response {
 		if recursion == 0 {
 			resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, pool))
 		} else {
-			_, pl, err := dbStoragePoolGet(d.db, pool)
+			plID, pl, err := dbStoragePoolGet(d.db, pool)
 			if err != nil {
 				continue
 			}
+
+			// COMMENT(brauner): Get all users of the storage pool.
+			poolUsedBy, err := storagePoolUsedByGet(d.db, plID)
+			if err != nil && err != NoSuchObjectError {
+				return SmartError(err)
+			}
+			pl.PoolUsedBy = poolUsedBy
+
 			resultMap = append(resultMap, *pl)
 		}
 	}
@@ -152,11 +160,18 @@ func storagePoolGet(d *Daemon, r *http.Request) Response {
 	}
 
 	// Get the existing storage pool.
-	_, pool, err := dbStoragePoolGet(d.db, poolName)
+	poolID, pool, err := dbStoragePoolGet(d.db, poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
+	// COMMENT(brauner): Get all users of the storage pool.
+	poolUsedBy, err := storagePoolUsedByGet(d.db, poolID)
+	if err != nil && err != NoSuchObjectError {
+		return SmartError(err)
+	}
+	pool.PoolUsedBy = poolUsedBy
+
 	etag := []interface{}{pool.PoolName, pool.PoolUsedBy, pool.PoolConfig}
 
 	return SyncResponseETag(true, &pool, etag)
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 2f4ff4d..1744f89 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -1,7 +1,12 @@
 package main
 
 import (
+	"database/sql"
+	"fmt"
+	"strings"
+
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/version"
 )
 
 func storagePoolUpdate(d *Daemon, name string, newConfig map[string]string) error {
@@ -61,3 +66,30 @@ func storagePoolUpdate(d *Daemon, name string, newConfig map[string]string) erro
 
 	return nil
 }
+
+func storagePoolUsedByGet(db *sql.DB, poolID int64) ([]string, error) {
+	poolVolumes, err := dbStoragePoolVolumesGet(db, poolID)
+	if err != nil {
+		return []string{}, err
+	}
+
+	poolUsedBy := make([]string, len(poolVolumes))
+
+	for i := 0; i < len(poolVolumes); i++ {
+		apiEndpoint, err := storagePoolVolumeTypeNameToApiEndpoint(poolVolumes[i].VolumeType)
+		if apiEndpoint == storagePoolVolumeApiEndpointContainers {
+			if strings.Index(poolVolumes[i].VolumeName, shared.SnapshotDelimiter) > 0 {
+				apiEndpoint = "snapshots"
+			}
+		}
+
+		if err != nil {
+			shared.LogErrorf("Could not determine storage type for storage volume \"%s\".", poolVolumes[i].VolumeName)
+			// COMMENT(brauner): Should not happen.
+			apiEndpoint = "INVALID"
+		}
+		poolUsedBy[i] = fmt.Sprintf("/%s/%s/%s", version.APIVersion, apiEndpoint, poolVolumes[i].VolumeName)
+	}
+
+	return poolUsedBy, err
+}

From 7c036c068d8890e512edae7e32dc566140905ded Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sun, 15 Jan 2017 08:48:04 +0100
Subject: [PATCH 26/63] lxd/container*: add initStorage()

Initializing a new storage interface on is costly and there are plenty of places
where we don't need to access storage. So let's define a storage initializer
that can be called on demand.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/container_lxc.go | 105 +++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 81 insertions(+), 24 deletions(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 8862892..0a72304 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -362,27 +362,8 @@ func containerLXCLoad(d *Daemon, args containerArgs) (container, error) {
 		storagePool:  args.StoragePool,
 	}
 
-	// COMMENT(brauner): Initialize storage interface for this container.
-	s, err := storagePoolVolumeContainerLoadInit(d, args.Name)
-	if err != nil {
-		return nil, err
-	}
-	c.storage = s
-
-	// COMMENT(brauner): args.StoragePool can be empty here. In that case we
-	// simply set c.storagePool to the storage pool we detected based on the
-	// containers name. (Container names are globally unique.)
-	if args.StoragePool == "" {
-		c.storagePool = c.storage.GetContainerPool()
-	} else if args.StoragePool != c.storage.GetContainerPool() {
-		// COMMENT(brauner): If the storage pool passed in does not
-		// match the storage pool we reverse engineered based on the
-		// containers name we know something is messed up.
-		return nil, fmt.Errorf("Container is supposed to exist on storage pool \"%s\", but it actually exists on \"%s\".", args.StoragePool, c.storage.GetContainerPool())
-	}
-
 	// Load the config
-	err = c.init()
+	err := c.init()
 	if err != nil {
 		return nil, err
 	}
@@ -1404,6 +1385,34 @@ func (c *containerLXC) initLXC() error {
 	return nil
 }
 
+// COMMENT(brauner): Initialize storage interface for this container.
+func (c *containerLXC) initStorage() error {
+	if c.storage != nil {
+		return nil
+	}
+
+	s, err := storagePoolVolumeContainerLoadInit(c.daemon, c.Name())
+	if err != nil {
+		return err
+	}
+	c.storage = s
+
+	// COMMENT(brauner): args.StoragePool can be empty here. In that case we
+	// simply set c.storagePool to the storage pool we detected based on the
+	// containers name. (Container names are globally unique.)
+	storagePool := c.storage.ContainerPoolGet()
+	if c.storagePool == "" {
+		c.storagePool = storagePool
+	} else if c.storagePool != storagePool {
+		// COMMENT(brauner): If the storage pool passed in does not
+		// match the storage pool we reverse engineered based on the
+		// containers name we know something is messed up.
+		return fmt.Errorf("Container is supposed to exist on storage pool \"%s\", but it actually exists on \"%s\".", c.storagePool, storagePool)
+	}
+
+	return nil
+}
+
 // Config handling
 func (c *containerLXC) expandConfig() error {
 	config := map[string]string{}
@@ -2536,8 +2545,14 @@ func (c *containerLXC) Snapshots() ([]container, error) {
 func (c *containerLXC) Restore(sourceContainer container) error {
 	var ctxMap log.Ctx
 
+	// COMMENT(brauner): Initialize storage interface for the container.
+	err := c.initStorage()
+	if err != nil {
+		return err
+	}
+
 	// Check if we can restore the container
-	err := c.storage.ContainerCanRestore(c, sourceContainer)
+	err = c.storage.ContainerCanRestore(c, sourceContainer)
 	if err != nil {
 		return err
 	}
@@ -2658,6 +2673,12 @@ func (c *containerLXC) Delete() error {
 
 	shared.LogInfo("Deleting container", ctxMap)
 
+	// COMMENT(brauner): Initialize storage interface for the container.
+	err := c.initStorage()
+	if err != nil {
+		return err
+	}
+
 	if c.IsSnapshot() {
 		// Remove the snapshot
 		if err := c.storage.ContainerSnapshotDelete(c); err != nil {
@@ -2682,7 +2703,7 @@ func (c *containerLXC) Delete() error {
 	}
 
 	// Remove the database record
-	if err := dbContainerRemove(c.daemon.db, c.Name()); err != nil {
+	if err = dbContainerRemove(c.daemon.db, c.Name()); err != nil {
 		shared.LogError("Failed deleting container entry", ctxMap)
 		return err
 	}
@@ -2692,7 +2713,7 @@ func (c *containerLXC) Delete() error {
 	// unique.
 	poolID := c.storage.ContainerPoolIDGet()
 	// Remove volume from storage pool.
-	err := dbStoragePoolVolumeDelete(c.daemon.db, c.Name(), storagePoolVolumeTypeContainer, poolID)
+	err = dbStoragePoolVolumeDelete(c.daemon.db, c.Name(), storagePoolVolumeTypeContainer, poolID)
 	if err != nil {
 		return err
 	}
@@ -2749,6 +2770,12 @@ func (c *containerLXC) Rename(newName string) error {
 		}
 	}
 
+	// COMMENT(brauner): Initialize storage interface for the container.
+	err := c.initStorage()
+	if err != nil {
+		return err
+	}
+
 	// Rename the storage entry
 	if c.IsSnapshot() {
 		if err := c.storage.ContainerSnapshotRename(c, newName); err != nil {
@@ -2763,7 +2790,7 @@ func (c *containerLXC) Rename(newName string) error {
 	}
 
 	// Rename the database entry
-	if err := dbContainerRename(c.daemon.db, oldName, newName); err != nil {
+	if err = dbContainerRename(c.daemon.db, oldName, newName); err != nil {
 		shared.LogError("Failed renaming container", ctxMap)
 		return err
 	}
@@ -3112,6 +3139,12 @@ func (c *containerLXC) Update(args containerArgs, userRequested bool) error {
 		return err
 	}
 
+	// COMMENT(brauner): Initialize storage interface for the container.
+	err = c.initStorage()
+	if err != nil {
+		return err
+	}
+
 	// If apparmor changed, re-validate the apparmor profile
 	if shared.StringInSlice("raw.apparmor", changedConfig) || shared.StringInSlice("security.nesting", changedConfig) {
 		err = AAParseProfile(c)
@@ -3981,6 +4014,12 @@ func (c *containerLXC) Migrate(cmd uint, stateDir string, function string, stop
 		shared.LogWarn("unknown migrate call", log.Ctx{"cmd": cmd})
 	}
 
+	// COMMENT(brauner): Initialize storage interface for the container.
+	err = c.initStorage()
+	if err != nil {
+		return err
+	}
+
 	preservesInodes := c.storage.PreservesInodes()
 	/* This feature was only added in 2.0.1, let's not ask for it
 	 * before then or migrations will fail.
@@ -4670,6 +4709,12 @@ func (c *containerLXC) diskState() map[string]api.ContainerStateDisk {
 			continue
 		}
 
+		// COMMENT(brauner): Initialize storage interface for the container.
+		err := c.initStorage()
+		if err != nil {
+			continue
+		}
+
 		usage, err := c.storage.ContainerGetUsage(c)
 		if err != nil {
 			continue
@@ -4892,6 +4937,12 @@ func (c *containerLXC) Storage() storage {
 }
 
 func (c *containerLXC) StorageStart() error {
+	// COMMENT(brauner): Initialize storage interface for the container.
+	err := c.initStorage()
+	if err != nil {
+		return err
+	}
+
 	if c.IsSnapshot() {
 		return c.storage.ContainerSnapshotStart(c)
 	}
@@ -4900,6 +4951,12 @@ func (c *containerLXC) StorageStart() error {
 }
 
 func (c *containerLXC) StorageStop() error {
+	// COMMENT(brauner): Initialize storage interface for the container.
+	err := c.initStorage()
+	if err != nil {
+		return err
+	}
+
 	if c.IsSnapshot() {
 		return c.storage.ContainerSnapshotStop(c)
 	}

From 716124229bd028b4cb08ac5b47062d56152e7655 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sun, 15 Jan 2017 08:55:19 +0100
Subject: [PATCH 27/63] lxd/storage_volumes*: implement used_by

This commit implements retrieving all users of a given storage volume.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_volumes.go       | 31 +++++++++++++++++++++++++++----
 lxd/storage_volumes_utils.go | 31 +++++++++++++++++++++++++++++++
 2 files changed, 58 insertions(+), 4 deletions(-)

diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 6bc54da..4345ee0 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -44,14 +44,24 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) Response {
 	}
 
 	resultString := []string{}
-	if recursion == 0 {
-		for _, volume := range volumes {
-			apiEndpoint, err := storagePoolVolumeTypeNameToApiEndpoint(volume.VolumeType)
+	for _, volume := range volumes {
+		apiEndpoint, err := storagePoolVolumeTypeNameToApiEndpoint(volume.VolumeType)
+		if err != nil {
+			return InternalError(err)
+		}
+
+		if recursion == 0 {
+			resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s/%s", version.APIVersion, poolName, apiEndpoint, volume.VolumeName))
+		} else {
+			volumeUsedBy, err := storagePoolVolumeUsedByGet(d, volume.VolumeName)
 			if err != nil {
 				return InternalError(err)
 			}
-			resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s/%s", version.APIVersion, poolName, apiEndpoint, volume.VolumeName))
+			volume.VolumeUsedBy = volumeUsedBy
 		}
+	}
+
+	if recursion == 0 {
 		return SyncResponse(true, resultString)
 	}
 
@@ -122,6 +132,13 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response {
 			if err != nil {
 				continue
 			}
+
+			volumeUsedBy, err := storagePoolVolumeUsedByGet(d, vol.VolumeName)
+			if err != nil {
+				return InternalError(err)
+			}
+			vol.VolumeUsedBy = volumeUsedBy
+
 			resultMap = append(resultMap, vol)
 		}
 	}
@@ -280,6 +297,12 @@ func storagePoolVolumeTypeGet(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	volumeUsedBy, err := storagePoolVolumeUsedByGet(d, volume.VolumeName)
+	if err != nil {
+		return InternalError(err)
+	}
+	volume.VolumeUsedBy = volumeUsedBy
+
 	etag := []interface{}{volume.VolumeName, volume.VolumeType, volume.VolumeUsedBy, volume.VolumeConfig}
 
 	return SyncResponseETag(true, volume, etag)
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index 7bc2383..c4efd71 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -5,6 +5,7 @@ import (
 	"strings"
 
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/version"
 )
 
 const (
@@ -186,3 +187,33 @@ func storagePoolVolumeUpdate(d *Daemon, poolName string, volumeName string, volu
 
 	return nil
 }
+
+func storagePoolVolumeUsedByGet(d *Daemon, volumeName string) ([]string, error) {
+	// Look for containers using the interface
+	cts, err := dbContainersList(d.db, cTypeRegular)
+	if err != nil {
+		return []string{}, err
+	}
+
+	volumeUsedBy := []string{}
+	for _, ct := range cts {
+		// COMMENT(brauner): We're not accessing any storage here.
+		c, err := containerLoadByName(d, ct)
+		if err != nil {
+			return []string{}, err
+		}
+
+		for _, d := range c.LocalDevices() {
+			if d["type"] != "disk" {
+				continue
+			}
+
+			containerAsVolume := fmt.Sprintf("%s/%s", storagePoolVolumeApiEndpointContainers, volumeName)
+			if (d["source"] == volumeName) || (d["source"] == containerAsVolume) {
+				volumeUsedBy = append(volumeUsedBy, fmt.Sprintf("/%s/containers/%s", version.APIVersion, ct))
+			}
+		}
+	}
+
+	return volumeUsedBy, nil
+}

From 38d6d60bd3e2c8647683ef64e65329620540267f Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sun, 15 Jan 2017 22:48:06 +0100
Subject: [PATCH 28/63] lxd/container_lxc: adapt container renaming

This commit adapts renaming of container storage volumes to the new storage api.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/container_lxc.go | 34 ++++++++++++++++++++++++++++++----
 1 file changed, 30 insertions(+), 4 deletions(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 0a72304..c064299 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -2778,23 +2778,34 @@ func (c *containerLXC) Rename(newName string) error {
 
 	// Rename the storage entry
 	if c.IsSnapshot() {
-		if err := c.storage.ContainerSnapshotRename(c, newName); err != nil {
+		err := c.storage.ContainerSnapshotRename(c, newName)
+		if err != nil {
 			shared.LogError("Failed renaming container", ctxMap)
 			return err
 		}
 	} else {
-		if err := c.storage.ContainerRename(c, newName); err != nil {
+		err := c.storage.ContainerRename(c, newName)
+		if err != nil {
 			shared.LogError("Failed renaming container", ctxMap)
 			return err
 		}
 	}
 
 	// Rename the database entry
-	if err = dbContainerRename(c.daemon.db, oldName, newName); err != nil {
+	err = dbContainerRename(c.daemon.db, oldName, newName)
+	if err != nil {
 		shared.LogError("Failed renaming container", ctxMap)
 		return err
 	}
 
+	// COMMENT(brauner): Rename storage volume for the container.
+	poolID := c.storage.ContainerPoolIDGet()
+	err = dbStoragePoolVolumeRename(c.daemon.db, oldName, newName, storagePoolVolumeTypeContainer, poolID)
+	if err != nil {
+		shared.LogError("Failed renaming storage volume", ctxMap)
+		return err
+	}
+
 	if !c.IsSnapshot() {
 		// Rename all the snapshots
 		results, err := dbContainerGetSnapshots(c.daemon.db, oldName)
@@ -2807,16 +2818,31 @@ func (c *containerLXC) Rename(newName string) error {
 			// Rename the snapshot
 			baseSnapName := filepath.Base(sname)
 			newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName
-			if err := dbContainerRename(c.daemon.db, sname, newSnapshotName); err != nil {
+			err := dbContainerRename(c.daemon.db, sname, newSnapshotName)
+			if err != nil {
 				shared.LogError("Failed renaming container", ctxMap)
 				return err
 			}
+
+			// COMMENT(brauner): Rename storage volume for the
+			// snapshot.
+			err = dbStoragePoolVolumeRename(c.daemon.db, sname, newSnapshotName, storagePoolVolumeTypeContainer, poolID)
+			if err != nil {
+				shared.LogError("Failed renaming storage volume", ctxMap)
+				return err
+			}
 		}
 	}
 
 	// Set the new name in the struct
 	c.name = newName
 
+	// COMMENT(brauner): Update the storage volume name in the storage
+	// interface.
+	sNew := c.storage.GetStoragePoolVolumeWritable()
+	sNew.VolumeName = newName
+	c.storage.SetStoragePoolVolumeWritable(&sNew)
+
 	// Invalidate the go-lxc cache
 	c.c = nil
 

From 1bf348a93f4d0353e33e2a58afab4050ad3fd8b1 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 18 Jan 2017 13:55:53 +0100
Subject: [PATCH 29/63] lxd/storage: add StoragePool{M,Um}ount()

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage.go | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/lxd/storage.go b/lxd/storage.go
index 45bbbc7..c93c35f 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -178,6 +178,8 @@ type storage interface {
 	StoragePoolCheck() error
 	StoragePoolCreate() error
 	StoragePoolDelete() error
+	StoragePoolMount() error
+	StoragePoolUmount() error
 	StoragePoolUpdate(changedConfig []string) error
 	GetStoragePoolWritable() api.StoragePoolPut
 	SetStoragePoolWritable(writable *api.StoragePoolPut)
@@ -547,6 +549,14 @@ func (lw *storageLogWrapper) StoragePoolVolumeDelete() error {
 	return lw.w.StoragePoolVolumeDelete()
 }
 
+func (lw *storageLogWrapper) StoragePoolMount() error {
+	return lw.w.StoragePoolMount()
+}
+
+func (lw *storageLogWrapper) StoragePoolUmount() error {
+	return lw.w.StoragePoolUmount()
+}
+
 func (lw *storageLogWrapper) StoragePoolVolumeMount() error {
 	return lw.w.StoragePoolVolumeMount()
 }

From 66271fc2a5b38526a73efb66a798406b344e4344 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 18 Jan 2017 14:00:14 +0100
Subject: [PATCH 30/63] lxd/storage_btrfs: add StoragePool{M,Um}ount()

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_btrfs.go | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index d22a281..dfd4a76 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -89,6 +89,14 @@ func (s *storageBtrfs) StoragePoolVolumeDelete() error {
 	return nil
 }
 
+func (s *storageBtrfs) StoragePoolMount() error {
+	return nil
+}
+
+func (s *storageBtrfs) StoragePoolUmount() error {
+	return nil
+}
+
 func (s *storageBtrfs) StoragePoolVolumeMount() error {
 	return nil
 }

From 82c9bcb780cc9254c0b363723a6c98d35009f1e1 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 18 Jan 2017 14:00:32 +0100
Subject: [PATCH 31/63] lxd/storage_dir: add StoragePool{M,Um}ount()

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_dir.go | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index 8b8e361..32d4726 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -71,6 +71,14 @@ func (s *storageDir) StoragePoolVolumeDelete() error {
 	return nil
 }
 
+func (s *storageDir) StoragePoolMount() error {
+	return nil
+}
+
+func (s *storageDir) StoragePoolUmount() error {
+	return nil
+}
+
 func (s *storageDir) StoragePoolVolumeMount() error {
 	return nil
 }

From 0869d2d685c64dd4afc12914e78e3689e769a9ee Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 18 Jan 2017 14:00:44 +0100
Subject: [PATCH 32/63] lxd/storage_lvm: add StoragePool{M,Um}ount()

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_lvm.go | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 974514d..056377a 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -268,6 +268,14 @@ func (s *storageLvm) StoragePoolVolumeDelete() error {
 	return nil
 }
 
+func (s *storageLvm) StoragePoolMount() error {
+	return nil
+}
+
+func (s *storageLvm) StoragePoolUmount() error {
+	return nil
+}
+
 func (s *storageLvm) StoragePoolVolumeMount() error {
 	return nil
 }

From f5c282f853fe88e4be9d7ecd285299ef3a0af199 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 18 Jan 2017 14:00:55 +0100
Subject: [PATCH 33/63] lxd/storage_zfs: add StoragePool{M,Um}ount()

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_zfs.go | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 0d1030a..2fb02cd 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -165,6 +165,14 @@ func (s *storageZfs) StoragePoolDelete() error {
 	return nil
 }
 
+func (s *storageZfs) StoragePoolMount() error {
+	return nil
+}
+
+func (s *storageZfs) StoragePoolUmount() error {
+	return nil
+}
+
 func (s *storageZfs) StoragePoolVolumeDelete() error {
 	fs := fmt.Sprintf("custom/%s", s.volume.VolumeName)
 	fsMountpoint := fmt.Sprintf("%s@%s", shared.VarPath(fs), s.pool.PoolName)

From ab5a72744d60e7d8dd24e66b266bda52bec0983a Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 18 Jan 2017 13:52:12 +0100
Subject: [PATCH 34/63] lxd/storage_cgo: add lxd/storage_cgo

This file contains functions and types dealing with storage that are better done
in C than in Go. Suitable Go wrappers are provided as needed.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_cgo.go | 169 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 169 insertions(+)
 create mode 100644 lxd/storage_cgo.go

diff --git a/lxd/storage_cgo.go b/lxd/storage_cgo.go
new file mode 100644
index 0000000..b95838f
--- /dev/null
+++ b/lxd/storage_cgo.go
@@ -0,0 +1,169 @@
+// +build linux
+// +build cgo
+
+package main
+
+/*
+#define _GNU_SOURCE
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/loop.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifndef LO_FLAGS_AUTOCLEAR
+#define LO_FLAGS_AUTOCLEAR 4
+#endif
+
+static int get_unused_loop_dev_legacy(char *loop_name)
+{
+	struct dirent *dp;
+	struct loop_info64 lo64;
+	DIR *dir;
+	int dfd = -1, fd = -1, ret = -1;
+
+	dir = opendir("/dev");
+	if (!dir)
+		return -1;
+
+	while ((dp = readdir(dir))) {
+		if (!dp)
+			break;
+
+		if (strncmp(dp->d_name, "loop", 4) != 0)
+			continue;
+
+		dfd = dirfd(dir);
+		if (dfd < 0)
+			continue;
+
+		fd = openat(dfd, dp->d_name, O_RDWR);
+		if (fd < 0)
+			continue;
+
+		ret = ioctl(fd, LOOP_GET_STATUS64, &lo64);
+		if (ret < 0)
+
+			if (ioctl(fd, LOOP_GET_STATUS64, &lo64) == 0 ||
+			    errno != ENXIO) {
+				close(fd);
+				fd = -1;
+				continue;
+			}
+
+		ret = snprintf(loop_name, LO_NAME_SIZE, "/dev/%s", dp->d_name);
+		if (ret < 0 || ret >= LO_NAME_SIZE) {
+			close(fd);
+			fd = -1;
+			continue;
+		}
+
+		break;
+	}
+
+	closedir(dir);
+
+	if (fd < 0)
+		return -1;
+
+	return fd;
+}
+
+static int get_unused_loop_dev(char *name_loop)
+{
+	int loop_nr, ret;
+	int fd_ctl = -1, fd_tmp = -1;
+
+	fd_ctl = open("/dev/loop-control", O_RDWR);
+	if (fd_ctl < 0)
+		return -ENODEV;
+
+	loop_nr = ioctl(fd_ctl, LOOP_CTL_GET_FREE);
+	if (loop_nr < 0)
+		goto on_error;
+
+	ret = snprintf(name_loop, LO_NAME_SIZE, "/dev/loop%d", loop_nr);
+	if (ret < 0 || ret >= LO_NAME_SIZE)
+		goto on_error;
+
+	fd_tmp = open(name_loop, O_RDWR);
+	if (fd_tmp < 0)
+		goto on_error;
+
+on_error:
+	close(fd_ctl);
+	return fd_tmp;
+}
+
+int prepare_loop_dev(const char *source, char *loop_dev)
+{
+	int ret;
+	struct loop_info64 lo64;
+	int fd_img = -1, fret = -1, fd_loop = -1;
+
+	fd_loop = get_unused_loop_dev(loop_dev);
+	if (fd_loop < 0) {
+		if (fd_loop == -ENODEV)
+			fd_loop = get_unused_loop_dev_legacy(loop_dev);
+		else
+			goto on_error;
+	}
+
+	fd_img = open(source, O_RDWR);
+	if (fd_img < 0)
+		goto on_error;
+
+	ret = ioctl(fd_loop, LOOP_SET_FD, fd_img);
+	if (ret < 0)
+		goto on_error;
+
+	memset(&lo64, 0, sizeof(lo64));
+	lo64.lo_flags = LO_FLAGS_AUTOCLEAR;
+
+	ret = ioctl(fd_loop, LOOP_SET_STATUS64, &lo64);
+	if (ret < 0)
+		goto on_error;
+
+	fret = 0;
+
+on_error:
+	if (fd_img >= 0)
+		close(fd_img);
+
+	if (fret < 0 && fd_loop >= 0) {
+		close(fd_loop);
+		fd_loop = -1;
+	}
+
+	return fd_loop;
+}
+*/
+import "C"
+
+// COMMENT(brauner):
+// prepareLoopDev() detects and sets up a loop device for source. It returns an
+// open file descriptor to the free loop device and the path of the free loop
+// device. It's the callers responsibility to close the open file descriptor.
+func prepareLoopDev(source string) (int, string) {
+	cLoopDev := C.malloc(C.size_t(C.LO_NAME_SIZE))
+	if cLoopDev == nil {
+		return -1, ""
+	}
+	defer C.free(cLoopDev)
+
+	cSource := C.CString(source)
+	ret := int(C.prepare_loop_dev(cSource, (*C.char)(cLoopDev)))
+	if ret < 0 {
+		return -1, ""
+	}
+
+	goLoopDev := C.GoString((*C.char)(cLoopDev))
+	return ret, goLoopDev
+}

From ae4e1674030f43231b5e43798f653e75c7c5eac7 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 18 Jan 2017 14:22:06 +0100
Subject: [PATCH 35/63] lxd/storage_btrfs: implement StoragePool{M,Um}ount

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_btrfs.go | 44 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index dfd4a76..76f9577 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -90,10 +90,54 @@ func (s *storageBtrfs) StoragePoolVolumeDelete() error {
 }
 
 func (s *storageBtrfs) StoragePoolMount() error {
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
+
+	target := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
+
+	if !shared.PathExists(target) {
+		err := os.MkdirAll(target, 0700)
+		if err != nil {
+			return err
+		}
+	}
+
+	if !shared.IsMountPoint(target) {
+		mountOptions := ""
+		// COMMENT(brauner): This is a loop mount.
+		if !shared.IsBlockdevPath(source) {
+			fd, loopDev := prepareLoopDev(source)
+			if fd < 0 {
+				return fmt.Errorf("Could not prepare loop device.")
+			}
+			file := os.NewFile(uintptr(fd), "")
+			defer file.Close()
+
+			err := tryMount(loopDev, target, "btrfs", 0, mountOptions)
+			if err != nil {
+				return err
+			}
+		}
+
+		// TODO(brauner): Implement mounting when btrfs source is block
+		// device.
+	}
+
 	return nil
 }
 
 func (s *storageBtrfs) StoragePoolUmount() error {
+	target := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
+
+	if shared.IsMountPoint(target) {
+		err := tryUnmount(target, 0)
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 

From 773054048b0292bd226e929562f7ecfd267152fc Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 18 Jan 2017 14:24:10 +0100
Subject: [PATCH 36/63] lxd/storage_btrfs: implement StoragePoolCreate()

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_btrfs.go | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 51 insertions(+)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 76f9577..1288529 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -74,6 +74,57 @@ func (s *storageBtrfs) StoragePoolCheck() error {
 }
 
 func (s *storageBtrfs) StoragePoolCreate() error {
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
+
+	// COMMENT(brauner): Create the mountpoint for the storage pool.
+	target := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
+	err := os.MkdirAll(target, 0700)
+	if err != nil {
+		return err
+	}
+
+	if !shared.IsBlockdevPath(source) {
+		source = source + ".img"
+		s.pool.PoolConfig["source"] = source
+
+		// COMMENT(brauner): This is likely a loop file.
+		f, err := os.Create(source)
+		if err != nil {
+			return fmt.Errorf("Failed to open %s: %s", source, err)
+		}
+
+		err = f.Chmod(0600)
+		if err != nil {
+			return fmt.Errorf("Failed to chmod %s: %s", source, err)
+		}
+
+		size, err := strconv.ParseInt(s.pool.PoolConfig["size"], 10, 64)
+		if err != nil {
+			return err
+		}
+
+		err = f.Truncate(size)
+		if err != nil {
+			return fmt.Errorf("Failed to create sparse file %s: %s", source, err)
+		}
+
+		err = f.Close()
+		if err != nil {
+			return fmt.Errorf("Failed to close %s: %s", source, err)
+		}
+	}
+
+	// COMMENT(brauner): Create a btrfs filesystem.
+	output, err := exec.Command(
+		"mkfs.btrfs",
+		"-L", s.pool.PoolName, source).CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("Failed to create the BTRFS pool: %s", output)
+	}
+
 	return nil
 }
 

From 9e6c0f06074fcb39a15c18ed7082d84dc2aeb4e1 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 18 Jan 2017 14:24:48 +0100
Subject: [PATCH 37/63] lxd/storage_btrfs: implement StoragePoolDelete()

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_btrfs.go | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 1288529..c7b8494 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -133,6 +133,35 @@ func (s *storageBtrfs) StoragePoolVolumeCreate() error {
 }
 
 func (s *storageBtrfs) StoragePoolDelete() error {
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
+
+	target := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
+
+	// COMMENT(brauner): If the storage pool is currently mounted, unmount
+	// it.
+	if shared.IsMountPoint(target) {
+		err := s.StoragePoolUmount()
+		if err != nil {
+			return err
+		}
+	}
+
+	if shared.IsBlockdevPath(source) {
+		// TODO(brauner): Remove block devices.
+	} else {
+		// COMMENT(brauner): This is a loop file --> simply remove it.
+		err := os.Remove(target)
+		if err != nil {
+			return err
+		}
+	}
+
+	// COMMENT(brauner): Remove the mountpoint for the storage pool.
+	os.RemoveAll(target)
+
 	return nil
 }
 

From 9c144ce4983c083f8dbc6eae145bf368ab3e5cf4 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 19 Jan 2017 13:17:43 +0100
Subject: [PATCH 38/63] lxd/storage_btrfs: implement ImageCreate()

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_btrfs.go | 106 +++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 102 insertions(+), 4 deletions(-)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index c7b8494..72cf526 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -618,18 +618,116 @@ func (s *storageBtrfs) ContainerSnapshotCreateEmpty(snapshotContainer container)
 }
 
 func (s *storageBtrfs) ImageCreate(fingerprint string) error {
+	// COMMENT(brauner): Create the subvolume.
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
+
+	poolMntPoint := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
+
+	// COMMENT(brauner): If the storage pool is currently unmounted, mount
+	// it. The btrfs storage pool will be mounted at
+	// ${LXD_DIR}/storage-pools/<pool_name>.
+	if !shared.IsMountPoint(poolMntPoint) {
+		err := s.StoragePoolMount()
+		if err != nil {
+			return err
+		}
+	}
+	// COMMENT(brauner): Unmount the pool again.
+	defer s.StoragePoolUmount()
+
+	// COMMENT(brauner): We can only create the btrfs subvolume under the
+	// mounted storage pool. The on-disk layout for images on a btrfs
+	// storage pool will thus be
+	// ${LXD_DIR}/storage-pools/<pool_name>/images/. The btrfs tool will
+	// complain if the intermediate path does not exist, so create it if it
+	// doesn't already.
+	imageSubvolPath := filepath.Join(poolMntPoint, "images")
+	if !shared.PathExists(imageSubvolPath) {
+		err := os.MkdirAll(imageSubvolPath, 0700)
+		if err != nil {
+			return err
+		}
+	}
+
+	// COMMENT(brauner): Create a temporary rw btrfs subvolume. From this rw
+	// subvolume we'll create a ro snapshot below. The path with which we do
+	// this is
+	// ${LXD_DIR}/storage-pools/<pool_name>/images/<fingerprint_tmp>.
+	subvolTmpPath := filepath.Join(imageSubvolPath, fmt.Sprintf("%s_tmp", fingerprint))
+	if err := s.btrfsPoolVolumeCreate(subvolTmpPath); err != nil {
+		return err
+	}
+	// COMMENT(brauner): Delete volume on error.
+	undo := true
+	defer func() {
+		if undo {
+			s.btrfsPoolVolumeDelete(subvolTmpPath)
+		}
+	}()
+
+	// COMMENT(brauner): Now create the mountpoint for the image:
+	// ${LXD_DIR}/images/<fingerprint>@<pool_name>.
+	imageMntPoint := filepath.Join(shared.VarPath("images"), fmt.Sprintf("%s@%s", fingerprint, s.pool.PoolName))
+	err := os.MkdirAll(imageMntPoint, 0700)
+	if err != nil {
+		return err
+	}
+	// COMMENT(brauner): Delete mountpoint on error, otherwise later image
+	// creations will fail (naggy btrfs).
+	defer func() {
+		if undo {
+			os.Remove(imageMntPoint)
+		}
+	}()
+
+	// COMMENT(brauner): Mount the storage volume for the image on its
+	// mountpoint.
+	// mount("/dev/loop<n>", "/path/to/target", "btrfs", 0, "subvol=subvol/name")
+	fd, loopDev := prepareLoopDev(source)
+	if fd < 0 {
+		return fmt.Errorf("Could not prepare loop device.")
+	}
+	file := os.NewFile(uintptr(fd), "")
+	defer file.Close()
+
+	subvol := fmt.Sprintf("subvol=images/%s_tmp", fingerprint)
+	err = tryMount(loopDev, imageMntPoint, "btrfs", 0, subvol)
+	if err != nil {
+		return err
+	}
+	// COMMENT(brauner): Unmount after we are done with the image.
+	defer tryUnmount(imageMntPoint, 0)
+
+	// COMMENT(brauner): Unpack the image in imageMntPoint.
 	imagePath := shared.VarPath("images", fingerprint)
-	subvol := fmt.Sprintf("%s.btrfs", imagePath)
+	if err := unpackImage(s.d, imagePath, imageMntPoint, storageTypeBtrfs); err != nil {
+		return err
+	}
 
-	if err := s.btrfsPoolVolumeCreate(subvol); err != nil {
+	// COMMENT(brauner): Now create a read-only snapshot of the subvolume.
+	// The path with which we do this is
+	// ${LXD_DIR}/storage-pools/<pool_name>/images/<fingerprint at readonly>.
+	subvolSnapshotPath := filepath.Join(imageSubvolPath, fingerprint)
+	if err := s.btrfsPoolVolumeSnapshot(subvolTmpPath, subvolSnapshotPath, true); err != nil {
 		return err
 	}
 
-	if err := unpackImage(s.d, imagePath, subvol, storageTypeBtrfs); err != nil {
-		s.btrfsPoolVolumeDelete(subvol)
+	defer func() {
+		if undo {
+			s.btrfsPoolVolumeDelete(subvolSnapshotPath)
+		}
+	}()
+
+	err = s.btrfsPoolVolumeDelete(subvolTmpPath)
+	if err != nil {
 		return err
 	}
 
+	undo = false
+
 	return nil
 }
 

From 829e7958960be501feb2f212b8b9bacf44b51731 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 19 Jan 2017 13:18:21 +0100
Subject: [PATCH 39/63] lxd/storage_btrfs: implement ImageDelete()

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_btrfs.go | 36 ++++++++++++++++++++++++++++++++----
 1 file changed, 32 insertions(+), 4 deletions(-)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 72cf526..7700d78 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -732,15 +732,43 @@ func (s *storageBtrfs) ImageCreate(fingerprint string) error {
 }
 
 func (s *storageBtrfs) ImageDelete(fingerprint string) error {
-	imagePath := shared.VarPath("images", fingerprint)
-	subvol := fmt.Sprintf("%s.btrfs", imagePath)
+	poolMntPoint := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
 
-	if s.isBtrfsPoolVolume(subvol) {
-		if err := s.btrfsPoolVolumesDelete(subvol); err != nil {
+	// COMMENT(brauner): If the storage pool is currently unmounted, mount
+	// it. The btrfs storage pool will be mounted at
+	// ${LXD_DIR}/storage-pools/<pool_name>.
+	if !shared.IsMountPoint(poolMntPoint) {
+		err := s.StoragePoolMount()
+		if err != nil {
+			return err
+		}
+	}
+	// COMMENT(brauner): Unmount the pool again.
+	defer s.StoragePoolUmount()
+
+	// COMMENT(brauner): Now check whether the image is mounted at:
+	// ${LXD_DIR}/images/<fingerprint>@<pool_name>. If so, unmount it.
+	imageMntPoint := filepath.Join(shared.VarPath("images"), fmt.Sprintf("%s@%s", fingerprint, s.pool.PoolName))
+	if shared.IsMountPoint(imageMntPoint) {
+		err := tryUnmount(imageMntPoint, 0)
+		if err != nil {
 			return err
 		}
 	}
 
+	// COMMENT(brauner): Delete the btrfs subvolume. The path with which we
+	// do this is ${LXD_DIR}/storage-pools/<pool_name>/images/<fingerprint at readonly>.
+	imageSubvolPath := filepath.Join(poolMntPoint, "images")
+	subvolPath := filepath.Join(imageSubvolPath, fingerprint)
+	err := s.btrfsPoolVolumeDelete(subvolPath)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Now delete the mountpoint for the image:
+	// ${LXD_DIR}/images/<fingerprint>@<pool_name>.
+	os.RemoveAll(imageMntPoint)
+
 	return nil
 }
 

From 384d2634c055acbb12de3d5b2336be7b6a96dc3c Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 19 Jan 2017 13:11:09 +0100
Subject: [PATCH 40/63] lxd/storage_zfs: some comments

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_zfs.go | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 2fb02cd..6e45ff5 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -829,6 +829,14 @@ func (s *storageZfs) ContainerSnapshotCreateEmpty(snapshotContainer container) e
 	return nil
 }
 
+// COMMENT(brauner):
+// - create temporary directory ${LXD_DIR}/images/lxd_images_
+// - create new zfs volume images/<fingerprint>
+// - mount the zfs volume on ${LXD_DIR}/images/lxd_images_
+// - unpack the downloaded image in ${LXD_DIR}/images/lxd_images_
+// - mark new zfs volume images/<fingerprint> readonly
+// - remove mountpoint property from zfs volume images/<fingerprint>
+// - create read-write snapshot from zfs volume images/<fingerprint>
 func (s *storageZfs) ImageCreate(fingerprint string) error {
 	// Create temporary mountpoint directory.
 	tmpImageDir, err := ioutil.TempDir(shared.VarPath("images"), "lxd_images_")

From 1074dbc1ff4b4715da7003796e8605dffa09aaef Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sun, 22 Jan 2017 18:57:45 +0100
Subject: [PATCH 41/63] lxd/storage*: adapt mount functions

Give them an additional boolean return value that indicates whether it actually
mounted or unmounted.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/container_lxc.go | 14 +++++++++++---
 lxd/storage.go       | 50 +++++++++++++++++++++++++++++---------------------
 lxd/storage_btrfs.go | 48 ++++++++++++++++++++++++++++--------------------
 lxd/storage_dir.go   | 32 ++++++++++++++++++++------------
 lxd/storage_lvm.go   | 42 +++++++++++++++++++++++++-----------------
 lxd/storage_zfs.go   | 45 ++++++++++++++++++++++++++++-----------------
 6 files changed, 141 insertions(+), 90 deletions(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index c064299..d30b333 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -4973,7 +4973,11 @@ func (c *containerLXC) StorageStart() error {
 		return c.storage.ContainerSnapshotStart(c)
 	}
 
-	return c.storage.ContainerStart(c.Name(), c.Path())
+	err, _ = c.storage.ContainerMount(c.Name(), c.Path())
+	if err != nil {
+		return err
+	}
+	return nil
 }
 
 func (c *containerLXC) StorageStop() error {
@@ -4987,7 +4991,11 @@ func (c *containerLXC) StorageStop() error {
 		return c.storage.ContainerSnapshotStop(c)
 	}
 
-	return c.storage.ContainerStop(c.Name(), c.Path())
+	err, _ = c.storage.ContainerUmount(c.Name(), c.Path())
+	if err != nil {
+		return err
+	}
+	return err
 }
 
 // Mount handling
@@ -5859,7 +5867,7 @@ func (c *containerLXC) createDiskDevice(name string, m types.Device) (string, er
 		if err != nil && !isOptional {
 			return "", fmt.Errorf("Failed to initialize storage volume \"%s\" on storage pool \"%s\": %s.", volumeName, m["pool"], err)
 		} else if err == nil {
-			err := s.StoragePoolVolumeMount()
+			err, _ := s.StoragePoolVolumeMount()
 			if err != nil {
 				shared.LogWarnf("Could not mount storage volume \"%s\" on storage pool \"%s\": %s.", volumeName, m["pool"], err)
 			}
diff --git a/lxd/storage.go b/lxd/storage.go
index c93c35f..17898f9 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -178,8 +178,8 @@ type storage interface {
 	StoragePoolCheck() error
 	StoragePoolCreate() error
 	StoragePoolDelete() error
-	StoragePoolMount() error
-	StoragePoolUmount() error
+	StoragePoolMount() (error, bool)
+	StoragePoolUmount() (error, bool)
 	StoragePoolUpdate(changedConfig []string) error
 	GetStoragePoolWritable() api.StoragePoolPut
 	SetStoragePoolWritable(writable *api.StoragePoolPut)
@@ -187,8 +187,8 @@ type storage interface {
 	// Functions dealing with storage volumes.
 	StoragePoolVolumeCreate() error
 	StoragePoolVolumeDelete() error
-	StoragePoolVolumeMount() error
-	StoragePoolVolumeUmount() error
+	StoragePoolVolumeMount() (error, bool)
+	StoragePoolVolumeUmount() (error, bool)
 	StoragePoolVolumeUpdate(changedConfig []string) error
 	GetStoragePoolVolumeWritable() api.StorageVolumePut
 	SetStoragePoolVolumeWritable(writable *api.StorageVolumePut)
@@ -202,8 +202,8 @@ type storage interface {
 	ContainerCanRestore(container container, sourceContainer container) error
 	ContainerDelete(container container) error
 	ContainerCopy(container container, sourceContainer container) error
-	ContainerStart(name string, path string) error
-	ContainerStop(name string, path string) error
+	ContainerMount(name string, path string) (error, bool)
+	ContainerUmount(name string, path string) (error, bool)
 	ContainerRename(container container, newName string) error
 	ContainerRestore(container container, sourceContainer container) error
 	ContainerSetQuota(container container, size int64) error
@@ -211,8 +211,7 @@ type storage interface {
 	ContainerPoolGet() string
 	ContainerPoolIDGet() int64
 
-	ContainerSnapshotCreate(
-		snapshotContainer container, sourceContainer container) error
+	ContainerSnapshotCreate(snapshotContainer container, sourceContainer container) error
 	ContainerSnapshotDelete(snapshotContainer container) error
 	ContainerSnapshotRename(snapshotContainer container, newName string) error
 	ContainerSnapshotStart(container container) error
@@ -223,6 +222,8 @@ type storage interface {
 
 	ImageCreate(fingerprint string) error
 	ImageDelete(fingerprint string) error
+	ImageMount(fingerprint string) (error, bool)
+	ImageUmount(fingerprint string) (error, bool)
 
 	MigrationType() MigrationFSType
 	/* does this storage backend preserve inodes when it is moved across
@@ -549,19 +550,19 @@ func (lw *storageLogWrapper) StoragePoolVolumeDelete() error {
 	return lw.w.StoragePoolVolumeDelete()
 }
 
-func (lw *storageLogWrapper) StoragePoolMount() error {
+func (lw *storageLogWrapper) StoragePoolMount() (error, bool) {
 	return lw.w.StoragePoolMount()
 }
 
-func (lw *storageLogWrapper) StoragePoolUmount() error {
+func (lw *storageLogWrapper) StoragePoolUmount() (error, bool) {
 	return lw.w.StoragePoolUmount()
 }
 
-func (lw *storageLogWrapper) StoragePoolVolumeMount() error {
+func (lw *storageLogWrapper) StoragePoolVolumeMount() (error, bool) {
 	return lw.w.StoragePoolVolumeMount()
 }
 
-func (lw *storageLogWrapper) StoragePoolVolumeUmount() error {
+func (lw *storageLogWrapper) StoragePoolVolumeUmount() (error, bool) {
 	return lw.w.StoragePoolVolumeUmount()
 }
 
@@ -643,14 +644,14 @@ func (lw *storageLogWrapper) ContainerCopy(
 	return lw.w.ContainerCopy(container, sourceContainer)
 }
 
-func (lw *storageLogWrapper) ContainerStart(name string, path string) error {
-	lw.log.Debug("ContainerStart", log.Ctx{"container": name})
-	return lw.w.ContainerStart(name, path)
+func (lw *storageLogWrapper) ContainerMount(name string, path string) (error, bool) {
+	lw.log.Debug("ContainerMount", log.Ctx{"container": name})
+	return lw.w.ContainerMount(name, path)
 }
 
-func (lw *storageLogWrapper) ContainerStop(name string, path string) error {
-	lw.log.Debug("ContainerStop", log.Ctx{"container": name})
-	return lw.w.ContainerStop(name, path)
+func (lw *storageLogWrapper) ContainerUmount(name string, path string) (error, bool) {
+	lw.log.Debug("ContainerUmount", log.Ctx{"container": name})
+	return lw.w.ContainerUmount(name, path)
 }
 
 func (lw *storageLogWrapper) ContainerRename(
@@ -744,16 +745,23 @@ func (lw *storageLogWrapper) ContainerSnapshotStop(container container) error {
 }
 
 func (lw *storageLogWrapper) ImageCreate(fingerprint string) error {
-	lw.log.Debug(
-		"ImageCreate",
-		log.Ctx{"fingerprint": fingerprint})
+	lw.log.Debug("ImageCreate", log.Ctx{"fingerprint": fingerprint})
 	return lw.w.ImageCreate(fingerprint)
 }
 
 func (lw *storageLogWrapper) ImageDelete(fingerprint string) error {
 	lw.log.Debug("ImageDelete", log.Ctx{"fingerprint": fingerprint})
 	return lw.w.ImageDelete(fingerprint)
+}
+
+func (lw *storageLogWrapper) ImageMount(fingerprint string) (error, bool) {
+	lw.log.Debug("ImageMount", log.Ctx{"fingerprint": fingerprint})
+	return lw.w.ImageMount(fingerprint)
+}
 
+func (lw *storageLogWrapper) ImageUmount(fingerprint string) (error, bool) {
+	lw.log.Debug("ImageUmount", log.Ctx{"fingerprint": fingerprint})
+	return lw.w.ImageUmount(fingerprint)
 }
 
 func (lw *storageLogWrapper) MigrationType() MigrationFSType {
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 7700d78..b6db673 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -143,7 +143,7 @@ func (s *storageBtrfs) StoragePoolDelete() error {
 	// COMMENT(brauner): If the storage pool is currently mounted, unmount
 	// it.
 	if shared.IsMountPoint(target) {
-		err := s.StoragePoolUmount()
+		err, _ := s.StoragePoolUmount()
 		if err != nil {
 			return err
 		}
@@ -169,10 +169,10 @@ func (s *storageBtrfs) StoragePoolVolumeDelete() error {
 	return nil
 }
 
-func (s *storageBtrfs) StoragePoolMount() error {
+func (s *storageBtrfs) StoragePoolMount() (error, bool) {
 	source := s.pool.PoolConfig["source"]
 	if source == "" {
-		return fmt.Errorf("No \"source\" property found for the storage pool.")
+		return fmt.Errorf("No \"source\" property found for the storage pool."), false
 	}
 
 	target := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
@@ -180,7 +180,7 @@ func (s *storageBtrfs) StoragePoolMount() error {
 	if !shared.PathExists(target) {
 		err := os.MkdirAll(target, 0700)
 		if err != nil {
-			return err
+			return err, false
 		}
 	}
 
@@ -190,14 +190,14 @@ func (s *storageBtrfs) StoragePoolMount() error {
 		if !shared.IsBlockdevPath(source) {
 			fd, loopDev := prepareLoopDev(source)
 			if fd < 0 {
-				return fmt.Errorf("Could not prepare loop device.")
+				return fmt.Errorf("Could not prepare loop device."), false
 			}
 			file := os.NewFile(uintptr(fd), "")
 			defer file.Close()
 
 			err := tryMount(loopDev, target, "btrfs", 0, mountOptions)
 			if err != nil {
-				return err
+				return err, false
 			}
 		}
 
@@ -205,28 +205,28 @@ func (s *storageBtrfs) StoragePoolMount() error {
 		// device.
 	}
 
-	return nil
+	return nil, true
 }
 
-func (s *storageBtrfs) StoragePoolUmount() error {
+func (s *storageBtrfs) StoragePoolUmount() (error, bool) {
 	target := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
 
 	if shared.IsMountPoint(target) {
 		err := tryUnmount(target, 0)
 		if err != nil {
-			return err
+			return err, false
 		}
 	}
 
-	return nil
+	return nil, true
 }
 
-func (s *storageBtrfs) StoragePoolVolumeMount() error {
-	return nil
+func (s *storageBtrfs) StoragePoolVolumeMount() (error, bool) {
+	return nil, true
 }
 
-func (s *storageBtrfs) StoragePoolVolumeUmount() error {
-	return nil
+func (s *storageBtrfs) StoragePoolVolumeUmount() (error, bool) {
+	return nil, true
 }
 
 func (s *storageBtrfs) GetStoragePoolWritable() api.StoragePoolPut {
@@ -382,12 +382,12 @@ func (s *storageBtrfs) ContainerCopy(container container, sourceContainer contai
 	return container.TemplateApply("copy")
 }
 
-func (s *storageBtrfs) ContainerStart(name string, path string) error {
-	return nil
+func (s *storageBtrfs) ContainerMount(name string, path string) (error, bool) {
+	return nil, true
 }
 
-func (s *storageBtrfs) ContainerStop(name string, path string) error {
-	return nil
+func (s *storageBtrfs) ContainerUmount(name string, path string) (error, bool) {
+	return nil, true
 }
 
 func (s *storageBtrfs) ContainerRename(container container, newName string) error {
@@ -630,7 +630,7 @@ func (s *storageBtrfs) ImageCreate(fingerprint string) error {
 	// it. The btrfs storage pool will be mounted at
 	// ${LXD_DIR}/storage-pools/<pool_name>.
 	if !shared.IsMountPoint(poolMntPoint) {
-		err := s.StoragePoolMount()
+		err, _ := s.StoragePoolMount()
 		if err != nil {
 			return err
 		}
@@ -738,7 +738,7 @@ func (s *storageBtrfs) ImageDelete(fingerprint string) error {
 	// it. The btrfs storage pool will be mounted at
 	// ${LXD_DIR}/storage-pools/<pool_name>.
 	if !shared.IsMountPoint(poolMntPoint) {
-		err := s.StoragePoolMount()
+		err, _ := s.StoragePoolMount()
 		if err != nil {
 			return err
 		}
@@ -772,6 +772,14 @@ func (s *storageBtrfs) ImageDelete(fingerprint string) error {
 	return nil
 }
 
+func (s *storageBtrfs) ImageMount(fingerprint string) (error, bool) {
+	return nil, true
+}
+
+func (s *storageBtrfs) ImageUmount(fingerprint string) (error, bool) {
+	return nil, true
+}
+
 func (s *storageBtrfs) btrfsPoolVolumeCreate(subvol string) error {
 	parentDestPath := filepath.Dir(subvol)
 	if !shared.PathExists(parentDestPath) {
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index 32d4726..e17a251 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -71,20 +71,20 @@ func (s *storageDir) StoragePoolVolumeDelete() error {
 	return nil
 }
 
-func (s *storageDir) StoragePoolMount() error {
-	return nil
+func (s *storageDir) StoragePoolMount() (error, bool) {
+	return nil, true
 }
 
-func (s *storageDir) StoragePoolUmount() error {
-	return nil
+func (s *storageDir) StoragePoolUmount() (error, bool) {
+	return nil, true
 }
 
-func (s *storageDir) StoragePoolVolumeMount() error {
-	return nil
+func (s *storageDir) StoragePoolVolumeMount() (error, bool) {
+	return nil, true
 }
 
-func (s *storageDir) StoragePoolVolumeUmount() error {
-	return nil
+func (s *storageDir) StoragePoolVolumeUmount() (error, bool) {
+	return nil, true
 }
 
 func (s *storageDir) GetStoragePoolWritable() api.StoragePoolPut {
@@ -212,12 +212,12 @@ func (s *storageDir) ContainerCopy(
 	return container.TemplateApply("copy")
 }
 
-func (s *storageDir) ContainerStart(name string, path string) error {
-	return nil
+func (s *storageDir) ContainerMount(name string, path string) (error, bool) {
+	return nil, true
 }
 
-func (s *storageDir) ContainerStop(name string, path string) error {
-	return nil
+func (s *storageDir) ContainerUmount(name string, path string) (error, bool) {
+	return nil, true
 }
 
 func (s *storageDir) ContainerRename(container container, newName string) error {
@@ -383,6 +383,14 @@ func (s *storageDir) ImageDelete(fingerprint string) error {
 	return nil
 }
 
+func (s *storageDir) ImageMount(fingerprint string) (error, bool) {
+	return nil, true
+}
+
+func (s *storageDir) ImageUmount(fingerprint string) (error, bool) {
+	return nil, true
+}
+
 func (s *storageDir) MigrationType() MigrationFSType {
 	return MigrationFSType_RSYNC
 }
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 056377a..59e3b58 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -268,20 +268,20 @@ func (s *storageLvm) StoragePoolVolumeDelete() error {
 	return nil
 }
 
-func (s *storageLvm) StoragePoolMount() error {
-	return nil
+func (s *storageLvm) StoragePoolMount() (error, bool) {
+	return nil, true
 }
 
-func (s *storageLvm) StoragePoolUmount() error {
-	return nil
+func (s *storageLvm) StoragePoolUmount() (error, bool) {
+	return nil, true
 }
 
-func (s *storageLvm) StoragePoolVolumeMount() error {
-	return nil
+func (s *storageLvm) StoragePoolVolumeMount() (error, bool) {
+	return nil, true
 }
 
-func (s *storageLvm) StoragePoolVolumeUmount() error {
-	return nil
+func (s *storageLvm) StoragePoolVolumeUmount() (error, bool) {
+	return nil, true
 }
 
 func (s *storageLvm) GetStoragePoolWritable() api.StoragePoolPut {
@@ -475,7 +475,7 @@ func (s *storageLvm) ContainerCopy(container container, sourceContainer containe
 			return err
 		}
 
-		if err := s.ContainerStart(container.Name(), container.Path()); err != nil {
+		if err, _ := s.ContainerMount(container.Name(), container.Path()); err != nil {
 			s.log.Error("Error starting/mounting container", log.Ctx{"err": err, "container": container.Name()})
 			s.ContainerDelete(container)
 			return err
@@ -490,14 +490,14 @@ func (s *storageLvm) ContainerCopy(container container, sourceContainer containe
 			return fmt.Errorf("rsync failed: %s", string(output))
 		}
 
-		if err := s.ContainerStop(container.Name(), container.Path()); err != nil {
+		if err, _ := s.ContainerUmount(container.Name(), container.Path()); err != nil {
 			return err
 		}
 	}
 	return container.TemplateApply("copy")
 }
 
-func (s *storageLvm) ContainerStart(name string, path string) error {
+func (s *storageLvm) ContainerMount(name string, path string) (error, bool) {
 	lvName := containerNameToLVName(name)
 	lvpath := fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, lvName)
 	fstype := daemonConfig["storage.lvm_fstype"].Get()
@@ -508,22 +508,22 @@ func (s *storageLvm) ContainerStart(name string, path string) error {
 		return fmt.Errorf(
 			"Error mounting snapshot LV path='%s': %v",
 			path,
-			err)
+			err), false
 	}
 
-	return nil
+	return nil, true
 }
 
-func (s *storageLvm) ContainerStop(name string, path string) error {
+func (s *storageLvm) ContainerUmount(name string, path string) (error, bool) {
 	err := tryUnmount(path, 0)
 	if err != nil {
 		return fmt.Errorf(
 			"failed to unmount container path '%s'.\nError: %v",
 			path,
-			err)
+			err), false
 	}
 
-	return nil
+	return nil, true
 }
 
 func (s *storageLvm) ContainerRename(
@@ -761,7 +761,7 @@ func (s *storageLvm) ContainerSnapshotStart(container container) error {
 }
 
 func (s *storageLvm) ContainerSnapshotStop(container container) error {
-	err := s.ContainerStop(container.Name(), container.Path())
+	err, _ := s.ContainerUmount(container.Name(), container.Path())
 	if err != nil {
 		return err
 	}
@@ -851,6 +851,14 @@ func (s *storageLvm) ImageDelete(fingerprint string) error {
 	return nil
 }
 
+func (s *storageLvm) ImageMount(fingerprint string) (error, bool) {
+	return nil, true
+}
+
+func (s *storageLvm) ImageUmount(fingerprint string) (error, bool) {
+	return nil, true
+}
+
 func (s *storageLvm) createDefaultThinPool() (string, error) {
 	thinPoolName := daemonConfig["storage.lvm_thinpool_name"].Get()
 	isRecent, err := s.lvmVersionIsAtLeast("2.02.99")
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 6e45ff5..0cf5a90 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -165,12 +165,12 @@ func (s *storageZfs) StoragePoolDelete() error {
 	return nil
 }
 
-func (s *storageZfs) StoragePoolMount() error {
-	return nil
+func (s *storageZfs) StoragePoolMount() (error, bool) {
+	return nil, true
 }
 
-func (s *storageZfs) StoragePoolUmount() error {
-	return nil
+func (s *storageZfs) StoragePoolUmount() (error, bool) {
+	return nil, true
 }
 
 func (s *storageZfs) StoragePoolVolumeDelete() error {
@@ -192,10 +192,10 @@ func (s *storageZfs) StoragePoolVolumeDelete() error {
 	return nil
 }
 
-func (s *storageZfs) StoragePoolVolumeMount() error {
+func (s *storageZfs) StoragePoolVolumeMount() (error, bool) {
 	volApiEndpoint, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.VolumeType)
 	if err != nil {
-		return err
+		return err, false
 	}
 
 	fs := fmt.Sprintf("%s/%s", volApiEndpoint, s.volume.VolumeName)
@@ -208,17 +208,17 @@ func (s *storageZfs) StoragePoolVolumeMount() error {
 	if !shared.IsMountPoint(fsMountpoint) {
 		err := s.zfsPoolVolumeMount(fs)
 		if err != nil {
-			return err
+			return err, false
 		}
 	}
 
-	return nil
+	return nil, true
 }
 
-func (s *storageZfs) StoragePoolVolumeUmount() error {
+func (s *storageZfs) StoragePoolVolumeUmount() (error, bool) {
 	volApiEndpoint, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.VolumeType)
 	if err != nil {
-		return err
+		return err, false
 	}
 
 	fs := fmt.Sprintf("%s/%s", volApiEndpoint, s.volume.VolumeName)
@@ -231,11 +231,11 @@ func (s *storageZfs) StoragePoolVolumeUmount() error {
 	if shared.IsMountPoint(fsMountpoint) {
 		err := s.zfsPoolVolumeUmount(fs)
 		if err != nil {
-			return err
+			return err, false
 		}
 	}
 
-	return nil
+	return nil, true
 }
 
 func (s *storageZfs) GetStoragePoolWritable() api.StoragePoolPut {
@@ -283,19 +283,22 @@ func (s *storageZfs) StoragePoolVolumeUpdate(changedConfig []string) error {
 }
 
 // Things we don't need to care about
-func (s *storageZfs) ContainerStart(name string, path string) error {
+func (s *storageZfs) ContainerMount(name string, path string) (error, bool) {
 	fs := fmt.Sprintf("containers/%s", name)
 
 	// Just in case the container filesystem got unmounted
 	if !shared.IsMountPoint(shared.VarPath(fs)) {
-		s.zfsPoolVolumeMount(fs)
+		err := s.zfsPoolVolumeMount(fs)
+		if err != nil {
+			return err, false
+		}
 	}
 
-	return nil
+	return nil, true
 }
 
-func (s *storageZfs) ContainerStop(name string, path string) error {
-	return nil
+func (s *storageZfs) ContainerUmount(name string, path string) (error, bool) {
+	return nil, true
 }
 
 // Things we do have to care about
@@ -957,6 +960,14 @@ func (s *storageZfs) ImageDelete(fingerprint string) error {
 	return nil
 }
 
+func (s *storageZfs) ImageMount(fingerprint string) (error, bool) {
+	return nil, true
+}
+
+func (s *storageZfs) ImageUmount(fingerprint string) (error, bool) {
+	return nil, true
+}
+
 // Helper functions
 func (s *storageZfs) zfsPoolCheck(pool string) error {
 	output, err := exec.Command(

From e9ca5aeb09286d868a6fb35e701ecd3f753f1c48 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Tue, 31 Jan 2017 23:17:42 +0100
Subject: [PATCH 42/63] util_linux: helpers to lookup UUIDs and disk paths

- LookupUUIDByBlockDevPath()
- LookupBlockDevByUUID()

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 shared/util_linux.go | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 73 insertions(+)

diff --git a/shared/util_linux.go b/shared/util_linux.go
index 87b44d6..4412a45 100644
--- a/shared/util_linux.go
+++ b/shared/util_linux.go
@@ -9,6 +9,7 @@ import (
 	"io"
 	"os"
 	"os/exec"
+	"path/filepath"
 	"strings"
 	"sync"
 	"sync/atomic"
@@ -700,3 +701,75 @@ func ExecReaderToChannel(r io.Reader, bufferSize int, exited <-chan bool, fd int
 
 	return ch
 }
+
+var ObjectFound = fmt.Errorf("Found requested object.")
+
+func LookupUUIDByBlockDevPath(diskDevice string) (string, error) {
+	uuid := ""
+	readUUID := func(path string, info os.FileInfo, err error) error {
+		if (info.Mode() & os.ModeSymlink) == os.ModeSymlink {
+			link, err := os.Readlink(path)
+			if err != nil {
+				return err
+			}
+
+			// COMMENT(brauner): filepath.Join() will call Clean()
+			// on the result and thus resolve those ugly "../../"
+			// parts that make it hard to compare the strings.
+			absPath := filepath.Join("/dev/disk/by-uuid", link)
+			if absPath == diskDevice {
+				uuid = path
+				// COMMENT(brauner): Will allows us to avoid
+				// needlessly travers the whole directory.
+				return ObjectFound
+			}
+		}
+		return nil
+	}
+
+	err := filepath.Walk("/dev/disk/by-uuid", readUUID)
+	if err != nil && err != ObjectFound {
+		return "", fmt.Errorf("Failed to detect UUID: %s.", err)
+	}
+
+	if uuid == "" {
+		return "", fmt.Errorf("Failed to detect UUID.")
+	}
+
+	lastSlash := strings.LastIndex(uuid, "/")
+	return uuid[lastSlash+1:], nil
+}
+
+func LookupBlockDevByUUID(uuid string) (string, error) {
+	detectedPath := ""
+	readPath := func(path string, info os.FileInfo, err error) error {
+		if (info.Mode() & os.ModeSymlink) == os.ModeSymlink {
+			link, err := os.Readlink(path)
+			if err != nil {
+				return err
+			}
+
+			if info.Name() == uuid {
+				// COMMENT(brauner): filepath.Join() will call Clean()
+				// on the result and thus resolve those ugly "../../"
+				// parts that make it hard to compare the strings.
+				detectedPath = filepath.Join("/dev/disk/by-uuid", link)
+				// COMMENT(brauner): Will allows us to avoid
+				// needlessly travers the whole directory.
+				return ObjectFound
+			}
+		}
+		return nil
+	}
+
+	err := filepath.Walk("/dev/disk/by-uuid", readPath)
+	if err != nil && err != ObjectFound {
+		return "", fmt.Errorf("Failed to detect disk device: %s.", err)
+	}
+
+	if detectedPath == "" {
+		return "", fmt.Errorf("Failed to detect disk device.")
+	}
+
+	return detectedPath, nil
+}

From 4a20d55e8a57fe287427f4dd2fe1600d7605b2e0 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sun, 22 Jan 2017 19:09:49 +0100
Subject: [PATCH 43/63] storage: reimplement internal generic storage api

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage.go | 160 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 159 insertions(+), 1 deletion(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index 17898f9..e8a814e 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -451,6 +451,164 @@ type storageShared struct {
 	volume *api.StorageVolume
 }
 
+// COMMENT(brauner):
+// {LXD_DIR}/storage-pools/<pool_name>
+func getStoragePoolMountPoint(poolName string) string {
+	return shared.VarPath("storage-pools", poolName)
+}
+
+// COMMENT(brauner):
+// ${LXD_DIR}/storage-pools/<pool_name>containers/<container_name>
+func getContainerMountPoint(poolName string, containerName string) string {
+	return shared.VarPath("storage-pools", poolName, "containers", containerName)
+}
+
+// COMMENT(brauner):
+// ${LXD_DIR}/storage-pools/<pool_name>/snapshots/<snapshot_name>
+func getSnapshotMountPoint(poolName string, snapshotName string) string {
+	return shared.VarPath("storage-pools", poolName, "snapshots", snapshotName)
+}
+
+// COMMENT(brauner):
+// ${LXD_DIR}/storage-pools/<pool_name>/images/<fingerprint>
+func getImageMountPoint(poolName string, fingerprint string) string {
+	return shared.VarPath("storage-pools", poolName, "images", fingerprint)
+}
+
+// COMMENT(brauner):
+// ${LXD_DIR}/storage-pools/<pool_name>/custom/<storage_volume>
+func getStoragePoolVolumeMountPoint(poolName string, volumeName string) string {
+	return shared.VarPath("storage-pools", poolName, "custom", volumeName)
+}
+
+func createContainerMountpoint(mountPoint string, mountPointSymlink string, privileged bool) error {
+	var mode os.FileMode
+	if privileged {
+		mode = 0700
+	} else {
+		mode = 0755
+	}
+
+	mntPointSymlinkExist := shared.PathExists(mountPointSymlink)
+	mntPointSymlinkTargetExist := shared.PathExists(mountPoint)
+
+	if !mntPointSymlinkTargetExist {
+		err := os.MkdirAll(mountPoint, mode)
+		if err != nil {
+			return err
+		}
+	}
+
+	if !mntPointSymlinkExist {
+		err := os.Symlink(mountPoint, mountPointSymlink)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func deleteContainerMountpoint(mountPoint string, mountPointSymlink string, storageTypeName string) error {
+	mntPointSuffix := storageTypeName
+	oldStyleMntPointSymlink := fmt.Sprintf("%s.%s", mountPointSymlink, mntPointSuffix)
+
+	if shared.PathExists(mountPointSymlink) {
+		err := os.Remove(mountPointSymlink)
+		if err != nil {
+			return err
+		}
+	}
+
+	if shared.PathExists(oldStyleMntPointSymlink) {
+		err := os.Remove(oldStyleMntPointSymlink)
+		if err != nil {
+			return err
+		}
+	}
+
+	if shared.PathExists(mountPoint) {
+		err := os.Remove(mountPoint)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func renameContainerMountpoint(oldMountPoint string, oldMountPointSymlink string, newMountPoint string, newMountPointSymlink string) error {
+	if shared.PathExists(oldMountPoint) {
+		err := os.Rename(oldMountPoint, newMountPoint)
+		if err != nil {
+			return err
+		}
+	}
+
+	// COMMENT(brauner): Rename the symlink target.
+	if shared.PathExists(oldMountPointSymlink) {
+		err := os.Remove(oldMountPointSymlink)
+		if err != nil {
+			return err
+		}
+	}
+
+	// COMMENT(brauner): Create the new symlink.
+	err := os.Symlink(newMountPoint, newMountPointSymlink)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func createSnapshotMountpoint(snapshotMountpoint string, snapshotsSymlinkTarget string, snapshotsSymlink string) error {
+	snapshotMntPointExists := shared.PathExists(snapshotMountpoint)
+	mntPointSymlinkExist := shared.PathExists(snapshotsSymlink)
+
+	if !snapshotMntPointExists {
+		err := os.MkdirAll(snapshotMountpoint, 0711)
+		if err != nil {
+			return err
+		}
+	}
+
+	if !mntPointSymlinkExist {
+		err := os.Symlink(snapshotsSymlinkTarget, snapshotsSymlink)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func deleteSnapshotMountpoint(snapshotMountpoint string, snapshotsSymlinkTarget string, snapshotsSymlink string) error {
+	if shared.PathExists(snapshotMountpoint) {
+		err := os.Remove(snapshotMountpoint)
+		if err != nil {
+			return err
+		}
+	}
+
+	couldRemove := false
+	if shared.PathExists(snapshotsSymlinkTarget) {
+		err := os.Remove(snapshotsSymlinkTarget)
+		if err == nil {
+			couldRemove = true
+		}
+	}
+
+	if couldRemove && shared.PathExists(snapshotsSymlink) {
+		err := os.Remove(snapshotsSymlink)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
 func (ss *storageShared) shiftRootfs(c container) error {
 	dpath := c.Path()
 	rpath := c.RootfsPath()
@@ -499,7 +657,7 @@ func (ss *storageShared) setUnprivUserAcl(c container, destPath string) error {
 	if err != nil {
 		_, err := exec.Command("chmod", "+x", destPath).CombinedOutput()
 		if err != nil {
-			return fmt.Errorf("Failed to chmod the container path.")
+			return fmt.Errorf("Failed to chmod the container path: %s.", err)
 		}
 	}
 

From 0226cb20145ee4f046fa54fb0fd2e5ec1d7f4cf4 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sun, 22 Jan 2017 19:11:16 +0100
Subject: [PATCH 44/63] lxd/storage: only retry umount on EBUSY

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage.go | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index e8a814e..a71a471 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -170,6 +170,8 @@ type storageCoreInfo interface {
 // - type storagePoolVolume interface
 // - type storageContainer interface
 // - type storageImage interface
+// Also, minimize the number of functions needed. Both should be straightforward
+// tasks.
 type storage interface {
 	storageCoreInfo
 
@@ -1177,7 +1179,7 @@ func tryUnmount(path string, flags int) error {
 		time.Sleep(500 * time.Millisecond)
 	}
 
-	if err != nil {
+	if err != nil && err == syscall.EBUSY {
 		return err
 	}
 

From 05d424da52fb380d98d8d517c306e9c5acccf0b8 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sun, 22 Jan 2017 19:10:34 +0100
Subject: [PATCH 45/63] storage: reimplement btrfs backend

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_btrfs.go | 1019 ++++++++++++++++++++++++++++++++++----------------
 1 file changed, 689 insertions(+), 330 deletions(-)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index b6db673..9e69fc2 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -12,6 +12,7 @@ import (
 	"strconv"
 	"strings"
 	"syscall"
+	"time"
 
 	"github.com/gorilla/websocket"
 	"github.com/pborman/uuid"
@@ -26,6 +27,54 @@ type storageBtrfs struct {
 	storageShared
 }
 
+// COMMENT(brauner):
+// ${LXD_DIR}/storage-pools/<pool_name>/containers
+func (s *storageBtrfs) getContainerSubvolumePath(poolName string) string {
+	return shared.VarPath("storage-pools", poolName, "containers")
+}
+
+// COMMENT(brauner):
+// ${LXD_DIR}/storage-pools/<pool_name>/snapshots
+func (s *storageBtrfs) getSnapshotSubvolumePath(poolName string, containerName string) string {
+	return shared.VarPath("storage-pools", poolName, "snapshots", containerName)
+}
+
+// COMMENT(brauner):
+// ${LXD_DIR}/storage-pools/<pool_name>/images
+func (s *storageBtrfs) getImageSubvolumePath(poolName string) string {
+	return shared.VarPath("storage-pools", poolName, "images")
+}
+
+// COMMENT(brauner):
+// ${LXD_DIR}/storage-pools/<pool_name>/custom
+func (s *storageBtrfs) getCustomSubvolumePath(poolName string) string {
+	return shared.VarPath("storage-pools", poolName, "custom")
+}
+
+// COMMENT(brauner):
+// subvol=containers/<container_name>
+func (s *storageBtrfs) getContainerMntOptions(name string) string {
+	return fmt.Sprintf("subvol=containers/%s", name)
+}
+
+// COMMENT(brauner):
+// subvol=snapshots/<snapshot_name>
+func (s *storageBtrfs) getSnapshotMntOptions(name string) string {
+	return fmt.Sprintf("subvol=snapshots/%s", name)
+}
+
+// COMMENT(brauner):
+// subvol=images/<fingerprint>
+func (s *storageBtrfs) getImageMntOptions(imageFingerprint string) string {
+	return fmt.Sprintf("subvol=images/%s", imageFingerprint)
+}
+
+// COMMENT(brauner):
+// subvol=custom/<custom_name>
+func (s *storageBtrfs) getCustomMntOptions() string {
+	return fmt.Sprintf("subvol=custom/%s", s.volume.VolumeName)
+}
+
 func (s *storageBtrfs) StorageCoreInit() (*storageCore, error) {
 	sCore := storageCore{}
 	sCore.sType = storageTypeBtrfs
@@ -70,6 +119,8 @@ func (s *storageBtrfs) StoragePoolInit(config map[string]interface{}) (storage,
 }
 
 func (s *storageBtrfs) StoragePoolCheck() error {
+	// TODO(brauner): Think of something smart or useful (And then think
+	// again if it is worth implementing it. :)).
 	return nil
 }
 
@@ -79,45 +130,63 @@ func (s *storageBtrfs) StoragePoolCreate() error {
 		return fmt.Errorf("No \"source\" property found for the storage pool.")
 	}
 
-	// COMMENT(brauner): Create the mountpoint for the storage pool.
-	target := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
-	err := os.MkdirAll(target, 0700)
-	if err != nil {
-		return err
+	if !filepath.IsAbs(source) {
+		return fmt.Errorf("Only absolute paths are allowed for now.")
 	}
 
-	if !shared.IsBlockdevPath(source) {
-		source = source + ".img"
-		s.pool.PoolConfig["source"] = source
+	// COMMENT(brauner): Create the mountpoint for the storage pool.
+	isBlockDev := shared.IsBlockdevPath(source)
+	if !isBlockDev {
+		if s.d.BackingFs == "btrfs" {
+			// COMMENT(brauner): Deal with the case where the
+			// backing fs is a btrfs pool itself.
+			// TODO(brauner): Figure out a way to let users create a
+			// loop file even if the backing fs is btrfs.
+			err := s.btrfsPoolVolumeCreate(source)
+			if err != nil {
+				return err
+			}
+			return nil
+		} else {
+			source = source + ".img"
+			s.pool.PoolConfig["source"] = source
 
-		// COMMENT(brauner): This is likely a loop file.
-		f, err := os.Create(source)
-		if err != nil {
-			return fmt.Errorf("Failed to open %s: %s", source, err)
-		}
+			// COMMENT(brauner): This is likely a loop file.
+			f, err := os.Create(source)
+			if err != nil {
+				return fmt.Errorf("Failed to open %s: %s", source, err)
+			}
 
-		err = f.Chmod(0600)
-		if err != nil {
-			return fmt.Errorf("Failed to chmod %s: %s", source, err)
-		}
+			err = f.Chmod(0600)
+			if err != nil {
+				return fmt.Errorf("Failed to chmod %s: %s", source, err)
+			}
 
-		size, err := strconv.ParseInt(s.pool.PoolConfig["size"], 10, 64)
-		if err != nil {
-			return err
-		}
+			size, err := strconv.ParseInt(s.pool.PoolConfig["size"], 10, 64)
+			if err != nil {
+				return err
+			}
 
-		err = f.Truncate(size)
-		if err != nil {
-			return fmt.Errorf("Failed to create sparse file %s: %s", source, err)
-		}
+			err = f.Truncate(size)
+			if err != nil {
+				return fmt.Errorf("Failed to create sparse file %s: %s", source, err)
+			}
 
-		err = f.Close()
-		if err != nil {
-			return fmt.Errorf("Failed to close %s: %s", source, err)
+			err = f.Close()
+			if err != nil {
+				return fmt.Errorf("Failed to close %s: %s", source, err)
+			}
 		}
 	}
 
+	poolMntPoint := getStoragePoolMountPoint(s.pool.PoolName)
+	err := os.MkdirAll(poolMntPoint, 0711)
+	if err != nil {
+		return err
+	}
+
 	// COMMENT(brauner): Create a btrfs filesystem.
+	// TODO(brauner): Figure out why this fails very in this weird way.
 	output, err := exec.Command(
 		"mkfs.btrfs",
 		"-L", s.pool.PoolName, source).CombinedOutput()
@@ -125,11 +194,25 @@ func (s *storageBtrfs) StoragePoolCreate() error {
 		return fmt.Errorf("Failed to create the BTRFS pool: %s", output)
 	}
 
-	return nil
-}
+	if isBlockDev && filepath.IsAbs(source) {
+		var err error
+		var devUUID string
+		// COMMENT(brauner): Ah, how I hate delay-dependent function
+		// calls... So, we need to try a couple of times after creating
+		// the pool since there might be a f*cking delay between
+		// creating the fs, and the symlink under /dev/disk/by-uuid
+		// being available.
+		for i := 0; i < 20; i++ {
+			devUUID, err = shared.LookupUUIDByBlockDevPath(source)
+			if err == nil {
+				s.pool.PoolConfig["source"] = devUUID
+				break
+			}
+			time.Sleep(500 * time.Millisecond)
+		}
+	}
 
-func (s *storageBtrfs) StoragePoolVolumeCreate() error {
-	return nil
+	return err
 }
 
 func (s *storageBtrfs) StoragePoolDelete() error {
@@ -138,81 +221,109 @@ func (s *storageBtrfs) StoragePoolDelete() error {
 		return fmt.Errorf("No \"source\" property found for the storage pool.")
 	}
 
-	target := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
+	err, _ := s.StoragePoolUmount()
+	if err != nil {
+		return err
+	}
 
-	// COMMENT(brauner): If the storage pool is currently mounted, unmount
-	// it.
-	if shared.IsMountPoint(target) {
-		err, _ := s.StoragePoolUmount()
+	devUUID := ""
+	// COMMENT(brauner): This is a UUID. Check whether we can find the block
+	// device.
+	if !filepath.IsAbs(source) {
+		msg := ""
+		devUUID, err = shared.LookupBlockDevByUUID(source)
 		if err != nil {
-			return err
+			msg = fmt.Sprintf("Failed to lookup disk device with UUID: %s.", source)
+		} else {
+			msg = fmt.Sprintf("Removing disk device %s with UUID: %s.", devUUID, source)
 		}
-	}
-
-	if shared.IsBlockdevPath(source) {
-		// TODO(brauner): Remove block devices.
+		s.log.Debug(msg)
 	} else {
-		// COMMENT(brauner): This is a loop file --> simply remove it.
-		err := os.Remove(target)
+		var err error
+		if s.d.BackingFs == "btrfs" {
+			err = s.btrfsPoolVolumeDelete(source)
+		} else {
+			// COMMENT(brauner): This is a loop file --> simply remove it.
+			err = os.Remove(source)
+		}
 		if err != nil {
 			return err
 		}
 	}
 
 	// COMMENT(brauner): Remove the mountpoint for the storage pool.
-	os.RemoveAll(target)
+	os.RemoveAll(getStoragePoolMountPoint(s.pool.PoolName))
 
 	return nil
 }
 
-func (s *storageBtrfs) StoragePoolVolumeDelete() error {
-	return nil
-}
-
 func (s *storageBtrfs) StoragePoolMount() (error, bool) {
 	source := s.pool.PoolConfig["source"]
 	if source == "" {
 		return fmt.Errorf("No \"source\" property found for the storage pool."), false
 	}
 
-	target := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
+	poolMntPoint := getStoragePoolMountPoint(s.pool.PoolName)
 
-	if !shared.PathExists(target) {
-		err := os.MkdirAll(target, 0700)
+	// COMMENT(brauner): Check whether the mount poolMntPoint exits.
+	if !shared.PathExists(poolMntPoint) {
+		err := os.MkdirAll(poolMntPoint, 0711)
 		if err != nil {
 			return err, false
 		}
 	}
 
-	if !shared.IsMountPoint(target) {
-		mountOptions := ""
-		// COMMENT(brauner): This is a loop mount.
-		if !shared.IsBlockdevPath(source) {
+	if shared.IsMountPoint(poolMntPoint) {
+		return nil, false
+	}
+
+	poolMntOptions := ""
+	mountSource := source
+	file := &os.File{}
+	if filepath.IsAbs(source) {
+		if !shared.IsBlockdevPath(source) && s.d.BackingFs != "btrfs" {
 			fd, loopDev := prepareLoopDev(source)
 			if fd < 0 {
 				return fmt.Errorf("Could not prepare loop device."), false
 			}
-			file := os.NewFile(uintptr(fd), "")
+			file = os.NewFile(uintptr(fd), "")
+			mountSource = loopDev
 			defer file.Close()
-
-			err := tryMount(loopDev, target, "btrfs", 0, mountOptions)
-			if err != nil {
-				return err, false
-			}
+		} else {
+			return nil, false
+		}
+	} else {
+		// COMMENT(brauner): Try to lookup the disk device by UUID but
+		// don't fail. If we don't find one this might just mean we have
+		// been given the UUID of a subvolume.
+		diskPath, err := shared.LookupBlockDevByUUID(source)
+		if err == nil {
+			mountSource = diskPath
+		} else {
+			// COMMENT(brauner): We have very likely been given a
+			// subvolume UUID. In this case we should simply assume
+			// that the user has mounted the parent of the subvolume
+			// or the subvolume itself. Otherwise this becomes a
+			// really messy detection task.
+			return nil, false
 		}
 
-		// TODO(brauner): Implement mounting when btrfs source is block
-		// device.
+	}
+
+	// COMMENT(brauner): This is a block device.
+	err := syscall.Mount(mountSource, poolMntPoint, "btrfs", 0, poolMntOptions)
+	if err != nil {
+		return err, false
 	}
 
 	return nil, true
 }
 
 func (s *storageBtrfs) StoragePoolUmount() (error, bool) {
-	target := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
+	poolMntPoint := getStoragePoolMountPoint(s.pool.PoolName)
 
-	if shared.IsMountPoint(target) {
-		err := tryUnmount(target, 0)
+	if shared.IsMountPoint(poolMntPoint) {
+		err := syscall.Unmount(poolMntPoint, 0)
 		if err != nil {
 			return err, false
 		}
@@ -221,30 +332,18 @@ func (s *storageBtrfs) StoragePoolUmount() (error, bool) {
 	return nil, true
 }
 
-func (s *storageBtrfs) StoragePoolVolumeMount() (error, bool) {
-	return nil, true
-}
-
-func (s *storageBtrfs) StoragePoolVolumeUmount() (error, bool) {
-	return nil, true
+func (s *storageBtrfs) StoragePoolUpdate(changedConfig []string) error {
+	return nil
 }
 
 func (s *storageBtrfs) GetStoragePoolWritable() api.StoragePoolPut {
 	return s.pool.Writable()
 }
 
-func (s *storageBtrfs) GetStoragePoolVolumeWritable() api.StorageVolumePut {
-	return s.volume.Writable()
-}
-
 func (s *storageBtrfs) SetStoragePoolWritable(writable *api.StoragePoolPut) {
 	s.pool.StoragePoolPut = *writable
 }
 
-func (s *storageBtrfs) SetStoragePoolVolumeWritable(writable *api.StorageVolumePut) {
-	s.volume.StorageVolumePut = *writable
-}
-
 func (s *storageBtrfs) ContainerPoolGet() string {
 	return s.pool.PoolName
 }
@@ -253,53 +352,216 @@ func (s *storageBtrfs) ContainerPoolIDGet() int64 {
 	return s.poolID
 }
 
-func (s *storageBtrfs) StoragePoolUpdate(changedConfig []string) error {
+// Functions dealing with storage volumes.
+func (s *storageBtrfs) StoragePoolVolumeCreate() error {
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Create subvolume path on the storage pool.
+	customSubvolumePath := s.getCustomSubvolumePath(s.pool.PoolName)
+	if !shared.PathExists(customSubvolumePath) {
+		err := os.MkdirAll(customSubvolumePath, 0700)
+		if err != nil {
+			return err
+		}
+	}
+
+	// COMMENT(brauner): Create subvolume.
+	customSubvolumeName := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
+	err = s.btrfsPoolVolumeCreate(customSubvolumeName)
+	if err != nil {
+		return err
+	}
+
 	return nil
 }
 
-func (s *storageBtrfs) StoragePoolVolumeUpdate(changedConfig []string) error {
+func (s *storageBtrfs) StoragePoolVolumeDelete() error {
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Delete subvolume.
+	customSubvolumeName := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
+	err = s.btrfsPoolVolumeDelete(customSubvolumeName)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Delete the mountpoint.
+	if shared.PathExists(customSubvolumeName) {
+		err = os.Remove(customSubvolumeName)
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
-func (s *storageBtrfs) ContainerCreate(container container) error {
-	cPath := container.Path()
+func (s *storageBtrfs) StoragePoolVolumeMount() (error, bool) {
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool."), false
+	}
 
-	// MkdirAll the pardir of the BTRFS Subvolume.
-	if err := os.MkdirAll(filepath.Dir(cPath), 0755); err != nil {
-		return err
+	// COMMENT(brauner): Check if the storage volume is already mounted.
+	customMntPoint := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
+	if shared.IsMountPoint(customMntPoint) {
+		return nil, false
 	}
 
-	// Create the BTRFS Subvolume
-	err := s.btrfsPoolVolumeCreate(cPath)
+	// COMMENT(brauner): Mount the storage volume on its mountpoint.
+	customMntOptions := ""
+	if !shared.IsBlockdevPath(source) {
+		// mount("/dev/loop<n>", "/path/to/target", "btrfs", 0, "subvol=subvol/name")
+		fd, loopDev := prepareLoopDev(source)
+		if fd < 0 {
+			return fmt.Errorf("Could not prepare loop device."), false
+		}
+		file := os.NewFile(uintptr(fd), "")
+		defer file.Close()
+
+		// COMMENT(brauner): Pass the btrfs subvolume name as
+		// mountoption.
+		customMntOptions = s.getCustomMntOptions()
+		err := syscall.Mount(loopDev, customMntPoint, "btrfs", 0, customMntOptions)
+		if err != nil {
+			return err, false
+		}
+	} else {
+		err := syscall.Mount(source, customMntPoint, "btrfs", 0, customMntOptions)
+		if err != nil {
+			return err, false
+		}
+	}
+
+	return nil, true
+}
+
+func (s *storageBtrfs) StoragePoolVolumeUmount() (error, bool) {
+	customMntPoint := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
+	if shared.IsMountPoint(customMntPoint) {
+		err := syscall.Unmount(customMntPoint, 0)
+		if err != nil {
+			return err, false
+		}
+	}
+
+	return nil, true
+}
+
+func (s *storageBtrfs) StoragePoolVolumeUpdate(changedConfig []string) error {
+	return nil
+}
+
+func (s *storageBtrfs) GetStoragePoolVolumeWritable() api.StorageVolumePut {
+	return s.volume.Writable()
+}
+
+func (s *storageBtrfs) SetStoragePoolVolumeWritable(writable *api.StorageVolumePut) {
+	s.volume.StorageVolumePut = *writable
+}
+
+// Functions dealing with container storage.
+func (s *storageBtrfs) ContainerCreate(container container) error {
+	err, _ := s.StoragePoolMount()
 	if err != nil {
 		return err
 	}
 
-	if container.IsPrivileged() {
-		if err := os.Chmod(cPath, 0700); err != nil {
+	// COMMENT(brauner): We can only create the btrfs subvolume under the
+	// mounted storage pool. The on-disk layout for containers on a btrfs
+	// storage pool will thus be
+	// ${LXD_DIR}/storage-pools/<pool_name>/containers/. The btrfs tool will
+	// complain if the intermediate path does not exist, so create it if it
+	// doesn't already.
+	containerSubvolumePath := s.getContainerSubvolumePath(s.pool.PoolName)
+	if !shared.PathExists(containerSubvolumePath) {
+		err := os.MkdirAll(containerSubvolumePath, 0711)
+		if err != nil {
 			return err
 		}
 	}
 
+	// COMMENT(brauner): Create empty subvolume for container.
+	containerSubvolumeName := getContainerMountPoint(s.pool.PoolName, container.Name())
+	err = s.btrfsPoolVolumeCreate(containerSubvolumeName)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Create the mountpoint for the container at:
+	// ${LXD_DIR}/containers/<name>
+	err = createContainerMountpoint(containerSubvolumeName, container.Path(), container.IsPrivileged())
+	if err != nil {
+		return err
+	}
+
 	return container.TemplateApply("create")
 }
 
-func (s *storageBtrfs) ContainerCreateFromImage(
-	container container, imageFingerprint string) error {
+// COMMENT(brauner): And this function is why I started hating on btrfs...
+func (s *storageBtrfs) ContainerCreateFromImage(container container, imageFingerprint string) error {
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
 
-	imageSubvol := fmt.Sprintf(
-		"%s.btrfs",
-		shared.VarPath("images", imageFingerprint))
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): We can only create the btrfs subvolume under the
+	// mounted storage pool. The on-disk layout for containers on a btrfs
+	// storage pool will thus be
+	// ${LXD_DIR}/storage-pools/<pool_name>/containers/. The btrfs tool will
+	// complain if the intermediate path does not exist, so create it if it
+	// doesn't already.
+	containerSubvolumePath := s.getContainerSubvolumePath(s.pool.PoolName)
+	if !shared.PathExists(containerSubvolumePath) {
+		err := os.MkdirAll(containerSubvolumePath, 0711)
+		if err != nil {
+			return err
+		}
+	}
+
+	// COMMENT(brauner): Mountpoint of the image:
+	// ${LXD_DIR}/images/<fingerprint>
+	mntErrno, _ := s.ImageMount(imageFingerprint)
+	if mntErrno != nil && mntErrno != syscall.ENOENT {
+		return mntErrno
+	}
+	if mntErrno != nil {
+		if mntErrno != syscall.ENOENT {
+			return mntErrno
+		}
 
-	// Create the btrfs subvol of the image first if it doesn exists.
-	if !shared.PathExists(imageSubvol) {
-		if err := s.ImageCreate(imageFingerprint); err != nil {
+		err := s.ImageCreate(imageFingerprint)
+		if err != nil {
 			return err
 		}
 	}
+	s.ImageUmount(imageFingerprint)
 
-	// Now make a snapshot of the image subvol
-	err := s.btrfsPoolVolumesSnapshot(imageSubvol, container.Path(), false)
+	// COMMENT(brauner): Create a rw snapshot at
+	// ${LXD_DIR}/storage-pools/<pool_name>/containers/<name>
+	// from the mounted ro image snapshot mounted at
+	// ${LXD_DIR}/storage-pools/<pool_name>/images/<fingerprint>
+	imageMntPoint := getImageMountPoint(s.pool.PoolName, imageFingerprint)
+	containerSubvolumeName := getContainerMountPoint(s.pool.PoolName, container.Name())
+	err = s.btrfsPoolVolumesSnapshot(imageMntPoint, containerSubvolumeName, false)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Create the mountpoint for the container at:
+	// ${LXD_DIR}/containers/<name>
+	err = createContainerMountpoint(containerSubvolumeName, container.Path(), container.IsPrivileged())
 	if err != nil {
 		return err
 	}
@@ -309,10 +571,6 @@ func (s *storageBtrfs) ContainerCreateFromImage(
 			s.ContainerDelete(container)
 			return err
 		}
-	} else {
-		if err := os.Chmod(container.Path(), 0700); err != nil {
-			return err
-		}
 	}
 
 	return container.TemplateApply("create")
@@ -323,21 +581,41 @@ func (s *storageBtrfs) ContainerCanRestore(container container, sourceContainer
 }
 
 func (s *storageBtrfs) ContainerDelete(container container) error {
-	cPath := container.Path()
+	// COMMENT(brauner): The storage pool needs to be mounted.
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Delete the subvolume.
+	containerSubvolumeName := getContainerMountPoint(s.pool.PoolName, container.Name())
+	err = s.btrfsPoolVolumeDelete(containerSubvolumeName)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Delete the container's symlink to the subvolume.
+	err = deleteContainerMountpoint(containerSubvolumeName, container.Path(), s.GetStorageTypeName())
+	if err != nil {
+		return err
+	}
 
-	// First remove the subvol (if it was one).
-	if s.isBtrfsPoolVolume(cPath) {
-		if err := s.btrfsPoolVolumesDelete(cPath); err != nil {
+	// COMMENT(brauner): Delete potential snapshot mountpoints.
+	snapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, container.Name())
+	if shared.PathExists(snapshotMntPoint) {
+		err := os.RemoveAll(snapshotMntPoint)
+		if err != nil {
 			return err
 		}
 	}
 
-	// Then the directory (if it still exists).
-	if shared.PathExists(cPath) {
-		err := os.RemoveAll(cPath)
+	// COMMENT(brauner): Delete potential symlink
+	// ${LXD_DIR}/snapshots/<container_name> -> ${POOL}/snapshots/<container_name>
+	snapshotSymlink := shared.VarPath("snapshots", container.Name())
+	if shared.PathExists(snapshotSymlink) {
+		err := os.Remove(snapshotSymlink)
 		if err != nil {
-			s.log.Error("ContainerDelete: failed", log.Ctx{"cPath": cPath, "err": err})
-			return fmt.Errorf("Error cleaning up %s: %s", cPath, err)
+			return err
 		}
 	}
 
@@ -345,36 +623,73 @@ func (s *storageBtrfs) ContainerDelete(container container) error {
 }
 
 func (s *storageBtrfs) ContainerCopy(container container, sourceContainer container) error {
-	subvol := sourceContainer.Path()
-	dpath := container.Path()
+	// COMMENT(brauner): The storage pool needs to be mounted.
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
 
-	if s.isBtrfsPoolVolume(subvol) {
-		// Snapshot the sourcecontainer
-		err := s.btrfsPoolVolumesSnapshot(subvol, dpath, false)
+	sourcePool := sourceContainer.Storage().ContainerPoolGet()
+	sourceContainerSubvolumeName := getContainerMountPoint(sourcePool, sourceContainer.Name())
+	targetContainerSubvolumeName := getContainerMountPoint(s.pool.PoolName, container.Name())
+	if s.ContainerPoolGet() == sourcePool {
+		// COMMNET(brauner): They are on the same storage pool which
+		// means they both use btrfs. So we can simply create a new
+		// snapshot of the source container. For this we only mount the
+		// btrfs storage pool and snapshot the subvolume names. No
+		// f*cking around with mounting the containers.
+		err, ourMount := s.StoragePoolMount()
 		if err != nil {
 			return err
 		}
+		if ourMount {
+			defer s.StoragePoolUmount()
+		}
+
+		err = s.btrfsPoolVolumesSnapshot(sourceContainerSubvolumeName, targetContainerSubvolumeName, false)
+		if err != nil {
+			return err
+		}
+
+		err = createContainerMountpoint(targetContainerSubvolumeName, container.Path(), container.IsPrivileged())
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Make sure that template apply finds the
+		// container mounted.
+		err, ourMount = s.ContainerMount(container.Name(), container.Path())
+		if err != nil {
+			return err
+		}
+		if ourMount {
+			defer s.ContainerUmount(container.Name(), container.Path())
+		}
 	} else {
-		// Create the BTRFS Container.
-		if err := s.ContainerCreate(container); err != nil {
+		// COMMENT(brauner): Create an empty btrfs storage volume.
+		err = s.ContainerCreate(container)
+		if err != nil {
 			return err
 		}
 
-		/*
-		 * Copy by using rsync
-		 */
-		output, err := storageRsyncCopy(
-			sourceContainer.Path(),
-			container.Path())
+		// COMMENT(brauner): Mount the source container since we do need it for
+		// acl permissions further down.
+		err := sourceContainer.StorageStart()
 		if err != nil {
-			s.ContainerDelete(container)
+			return err
+		}
 
+		// COMMENT(brauner): Use rsync to fill the empty volume.
+		output, err := storageRsyncCopy(sourceContainerSubvolumeName, targetContainerSubvolumeName)
+		if err != nil {
+			s.ContainerDelete(container)
 			s.log.Error("ContainerCopy: rsync failed", log.Ctx{"output": string(output)})
 			return fmt.Errorf("rsync failed: %s", string(output))
 		}
 	}
 
-	if err := s.setUnprivUserAcl(sourceContainer, dpath); err != nil {
+	err = s.setUnprivUserAcl(sourceContainer, targetContainerSubvolumeName)
+	if err != nil {
 		s.ContainerDelete(container)
 		return err
 	}
@@ -383,6 +698,12 @@ func (s *storageBtrfs) ContainerCopy(container container, sourceContainer contai
 }
 
 func (s *storageBtrfs) ContainerMount(name string, path string) (error, bool) {
+	// COMMENT(brauner): The storage pool must be mounted.
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err, false
+	}
+
 	return nil, true
 }
 
@@ -391,16 +712,43 @@ func (s *storageBtrfs) ContainerUmount(name string, path string) (error, bool) {
 }
 
 func (s *storageBtrfs) ContainerRename(container container, newName string) error {
-	oldName := container.Name()
-	oldPath := container.Path()
-	newPath := containerPath(newName, false)
+	// COMMENT(brauner): The storage pool must be mounted.
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
 
-	if err := os.Rename(oldPath, newPath); err != nil {
+	oldContainerSubvolumeName := getContainerMountPoint(s.pool.PoolName, container.Name())
+	newContainerSubvolumeName := getContainerMountPoint(s.pool.PoolName, newName)
+	err = os.Rename(oldContainerSubvolumeName, newContainerSubvolumeName)
+	if err != nil {
 		return err
 	}
 
-	if shared.PathExists(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName))) {
-		err := os.Rename(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName)), shared.VarPath(fmt.Sprintf("snapshots/%s", newName)))
+	newSymlink := shared.VarPath("containers", newName)
+	err = renameContainerMountpoint(oldContainerSubvolumeName, container.Path(), newContainerSubvolumeName, newSymlink)
+	if err != nil {
+		return err
+	}
+
+	oldSnapshotSubvolumeName := getSnapshotMountPoint(s.pool.PoolName, container.Name())
+	newSnapshotSubvolumeName := getSnapshotMountPoint(s.pool.PoolName, newName)
+	if shared.PathExists(oldSnapshotSubvolumeName) {
+		err = os.Rename(oldSnapshotSubvolumeName, newSnapshotSubvolumeName)
+		if err != nil {
+			return err
+		}
+	}
+
+	oldSnapshotSymlink := shared.VarPath("snapshots", container.Name())
+	newSnapshotSymlink := shared.VarPath("snapshots", newName)
+	if shared.PathExists(oldSnapshotSymlink) {
+		err := os.Remove(oldSnapshotSymlink)
+		if err != nil {
+			return err
+		}
+
+		err = os.Symlink(newSnapshotSubvolumeName, newSnapshotSymlink)
 		if err != nil {
 			return err
 		}
@@ -409,38 +757,59 @@ func (s *storageBtrfs) ContainerRename(container container, newName string) erro
 	return nil
 }
 
-func (s *storageBtrfs) ContainerRestore(
-	container container, sourceContainer container) error {
-
-	targetSubVol := container.Path()
-	sourceSubVol := sourceContainer.Path()
-	sourceBackupPath := container.Path() + ".back"
+func (s *storageBtrfs) ContainerRestore(container container, sourceContainer container) error {
+	// COMMENT(brauner): The storage pool must be mounted.
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
 
-	// Create a backup of the container
-	err := os.Rename(container.Path(), sourceBackupPath)
+	// COMMENT(brauner): Create a backup so we can revert.
+	targetContainerSubvolumeName := getContainerMountPoint(s.pool.PoolName, container.Name())
+	backupTargetContainerSubvolumeName := fmt.Sprintf("%s.back", targetContainerSubvolumeName)
+	err = os.Rename(targetContainerSubvolumeName, backupTargetContainerSubvolumeName)
 	if err != nil {
 		return err
 	}
+	undo := true
+	defer func() {
+		if undo {
+			os.Rename(backupTargetContainerSubvolumeName, targetContainerSubvolumeName)
+		}
+	}()
+
+	// COMMENT(brauner): Mount the source container.
+	srcContainerStorage := sourceContainer.Storage()
+	sourcePool := srcContainerStorage.ContainerPoolGet()
+	sourceContainerSubvolumeName := ""
+	if sourceContainer.IsSnapshot() {
+		sourceContainerSubvolumeName = getSnapshotMountPoint(sourcePool, sourceContainer.Name())
+		err = srcContainerStorage.ContainerSnapshotStart(sourceContainer)
+		if err != nil {
+			return err
+		}
+		defer srcContainerStorage.ContainerSnapshotStop(sourceContainer)
+	} else {
+		sourceContainerSubvolumeName = getContainerMountPoint(sourcePool, sourceContainer.Name())
+	}
 
 	var failure error
-	if s.isBtrfsPoolVolume(sourceSubVol) {
-		// Restore using btrfs snapshots.
-		err := s.btrfsPoolVolumesSnapshot(sourceSubVol, targetSubVol, false)
+	if s.ContainerPoolGet() == sourcePool {
+		// COMMENT(brauner): They are on the same storage pool, so we
+		// can simply snapshot.
+		err := s.btrfsPoolVolumesSnapshot(sourceContainerSubvolumeName, targetContainerSubvolumeName, false)
 		if err != nil {
 			failure = err
 		}
 	} else {
-		// Restore using rsync but create a btrfs subvol.
-		if err := s.btrfsPoolVolumeCreate(targetSubVol); err == nil {
-			output, err := storageRsyncCopy(
-				sourceSubVol,
-				targetSubVol)
-
+		err := s.btrfsPoolVolumeCreate(targetContainerSubvolumeName)
+		if err == nil {
+			// COMMENT(brauner): Use rsync to fill the empty volume.
+			// Sync by using the subvolume name.
+			output, err := storageRsyncCopy(sourceContainerSubvolumeName, targetContainerSubvolumeName)
 			if err != nil {
-				s.log.Error(
-					"ContainerRestore: rsync failed",
-					log.Ctx{"output": string(output)})
-
+				s.ContainerDelete(container)
+				s.log.Error("ContainerRestore: rsync failed", log.Ctx{"output": string(output)})
 				failure = err
 			}
 		} else {
@@ -449,20 +818,20 @@ func (s *storageBtrfs) ContainerRestore(
 	}
 
 	// Now allow unprivileged users to access its data.
-	if err := s.setUnprivUserAcl(sourceContainer, targetSubVol); err != nil {
+	err = s.setUnprivUserAcl(sourceContainer, targetContainerSubvolumeName)
+	if err != nil {
 		failure = err
 	}
 
-	if failure != nil {
-		// Restore original container
-		s.ContainerDelete(container)
-		os.Rename(sourceBackupPath, container.Path())
-	} else {
-		// Remove the backup, we made
-		if s.isBtrfsPoolVolume(sourceBackupPath) {
-			return s.btrfsPoolVolumesDelete(sourceBackupPath)
+	if failure == nil {
+		undo = false
+
+		if s.ContainerPoolGet() == srcContainerStorage.ContainerPoolGet() {
+			// Remove the backup, we made
+			return s.btrfsPoolVolumesDelete(backupTargetContainerSubvolumeName)
+			return nil
 		}
-		os.RemoveAll(sourceBackupPath)
+		os.RemoveAll(backupTargetContainerSubvolumeName)
 	}
 
 	return failure
@@ -494,65 +863,91 @@ func (s *storageBtrfs) ContainerGetUsage(container container) (int64, error) {
 	return s.btrfsPoolVolumeQGroupUsage(container.Path())
 }
 
-func (s *storageBtrfs) ContainerSnapshotCreate(
-	snapshotContainer container, sourceContainer container) error {
-
-	subvol := sourceContainer.Path()
-	dpath := snapshotContainer.Path()
+func (s *storageBtrfs) ContainerSnapshotCreate(snapshotContainer container, sourceContainer container) error {
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
 
-	if s.isBtrfsPoolVolume(subvol) {
-		// Create a readonly snapshot of the source.
-		err := s.btrfsPoolVolumesSnapshot(subvol, dpath, true)
+	// COMMENT(brauner): We can only create the btrfs subvolume under the
+	// mounted storage pool. The on-disk layout for snapshots on a btrfs
+	// storage pool will thus be
+	// ${LXD_DIR}/storage-pools/<pool_name>/snapshots/. The btrfs tool will
+	// complain if the intermediate path does not exist, so create it if it
+	// doesn't already.
+	snapshotSubvolumePath := s.getSnapshotSubvolumePath(s.pool.PoolName, sourceContainer.Name())
+	if !shared.PathExists(snapshotSubvolumePath) {
+		err := os.MkdirAll(snapshotSubvolumePath, 0711)
 		if err != nil {
-			s.ContainerSnapshotDelete(snapshotContainer)
 			return err
 		}
-	} else {
-		/*
-		 * Copy by using rsync
-		 */
-		output, err := storageRsyncCopy(
-			subvol,
-			dpath)
-		if err != nil {
-			s.ContainerSnapshotDelete(snapshotContainer)
+	}
 
-			s.log.Error(
-				"ContainerSnapshotCreate: rsync failed",
-				log.Ctx{"output": string(output)})
-			return fmt.Errorf("rsync failed: %s", string(output))
+	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.PoolName, "snapshots", s.volume.VolumeName)
+	snapshotMntPointSymlink := shared.VarPath("snapshots", sourceContainer.Name())
+	if !shared.PathExists(snapshotMntPointSymlink) {
+		err := createContainerMountpoint(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink, snapshotContainer.IsPrivileged())
+		if err != nil {
+			return err
 		}
 	}
 
+	srcContainerSubvolumeName := getContainerMountPoint(s.pool.PoolName, sourceContainer.Name())
+	snapshotSubvolumeName := getSnapshotMountPoint(s.pool.PoolName, snapshotContainer.Name())
+	err = s.btrfsPoolVolumesSnapshot(srcContainerSubvolumeName, snapshotSubvolumeName, true)
+	if err != nil {
+		return err
+	}
+
 	return nil
 }
 
-func (s *storageBtrfs) ContainerSnapshotDelete(
-	snapshotContainer container) error {
+func (s *storageBtrfs) ContainerSnapshotDelete(snapshotContainer container) error {
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
 
-	err := s.ContainerDelete(snapshotContainer)
+	snapshotSubvolumeName := getSnapshotMountPoint(s.pool.PoolName, snapshotContainer.Name())
+	err = s.btrfsPoolVolumeDelete(snapshotSubvolumeName)
 	if err != nil {
-		return fmt.Errorf("Error deleting snapshot %s: %s", snapshotContainer.Name(), err)
+		return err
 	}
 
-	oldPathParent := filepath.Dir(snapshotContainer.Path())
-	if ok, _ := shared.PathIsEmpty(oldPathParent); ok {
-		os.Remove(oldPathParent)
+	sourceSnapshotMntPoint := shared.VarPath("snapshots", snapshotContainer.Name())
+	os.Remove(sourceSnapshotMntPoint)
+	os.Remove(snapshotSubvolumeName)
+
+	sourceFields := strings.SplitN(snapshotContainer.Name(), shared.SnapshotDelimiter, 2)
+	sourceName := sourceFields[0]
+	snapshotSubvolumePath := s.getSnapshotSubvolumePath(s.pool.PoolName, sourceName)
+	os.Remove(snapshotSubvolumePath)
+	if !shared.PathExists(snapshotSubvolumePath) {
+		snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
+		os.Remove(snapshotMntPointSymlink)
 	}
+
 	return nil
 }
 
 func (s *storageBtrfs) ContainerSnapshotStart(container container) error {
-	if shared.PathExists(container.Path() + ".ro") {
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
+
+	snapshotSubvolumeName := getSnapshotMountPoint(s.pool.PoolName, container.Name())
+	roSnapshotSubvolumeName := fmt.Sprintf("%s.ro", snapshotSubvolumeName)
+	if shared.PathExists(roSnapshotSubvolumeName) {
 		return fmt.Errorf("The snapshot is already mounted read-write.")
 	}
 
-	err := os.Rename(container.Path(), container.Path()+".ro")
+	err = os.Rename(snapshotSubvolumeName, roSnapshotSubvolumeName)
 	if err != nil {
 		return err
 	}
 
-	err = s.btrfsPoolVolumesSnapshot(container.Path()+".ro", container.Path(), false)
+	err = s.btrfsPoolVolumesSnapshot(roSnapshotSubvolumeName, snapshotSubvolumeName, false)
 	if err != nil {
 		return err
 	}
@@ -561,16 +956,23 @@ func (s *storageBtrfs) ContainerSnapshotStart(container container) error {
 }
 
 func (s *storageBtrfs) ContainerSnapshotStop(container container) error {
-	if !shared.PathExists(container.Path() + ".ro") {
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
+
+	snapshotSubvolumeName := getSnapshotMountPoint(s.pool.PoolName, container.Name())
+	roSnapshotSubvolumeName := fmt.Sprintf("%s.ro", snapshotSubvolumeName)
+	if !shared.PathExists(roSnapshotSubvolumeName) {
 		return fmt.Errorf("The snapshot isn't currently mounted read-write.")
 	}
 
-	err := s.btrfsPoolVolumesDelete(container.Path())
+	err = s.btrfsPoolVolumesDelete(snapshotSubvolumeName)
 	if err != nil {
 		return err
 	}
 
-	err = os.Rename(container.Path()+".ro", container.Path())
+	err = os.Rename(roSnapshotSubvolumeName, snapshotSubvolumeName)
 	if err != nil {
 		return err
 	}
@@ -579,42 +981,64 @@ func (s *storageBtrfs) ContainerSnapshotStop(container container) error {
 }
 
 // ContainerSnapshotRename renames a snapshot of a container.
-func (s *storageBtrfs) ContainerSnapshotRename(
-	snapshotContainer container, newName string) error {
+func (s *storageBtrfs) ContainerSnapshotRename(snapshotContainer container, newName string) error {
+	// COMMENT(brauner): The storage pool must be mounted.
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
+	}
 
-	oldPath := snapshotContainer.Path()
-	newPath := containerPath(newName, true)
+	// COMMENT(brauner): Unmount the snapshot if it is mounted otherwise
+	// we'll get EBUSY.
+	// COMMENT(brauner): Rename the subvolume on the storage pool.
+	oldSnapshotSubvolumeName := getSnapshotMountPoint(s.pool.PoolName, snapshotContainer.Name())
+	newSnapshotSubvolumeName := getSnapshotMountPoint(s.pool.PoolName, newName)
+	err = os.Rename(oldSnapshotSubvolumeName, newSnapshotSubvolumeName)
+	if err != nil {
+		return err
+	}
 
-	// Create the new parent.
-	if !shared.PathExists(filepath.Dir(newPath)) {
-		os.MkdirAll(filepath.Dir(newPath), 0700)
+	return nil
+}
+
+// COMMENT(brauner): Needed for live migration where an empty snapshot needs to
+// be created before rsyncing into it.
+func (s *storageBtrfs) ContainerSnapshotCreateEmpty(snapshotContainer container) error {
+	// COMMENT(brauner): Mount the storage pool.
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
 	}
 
-	// Now rename the snapshot.
-	if !s.isBtrfsPoolVolume(oldPath) {
-		if err := os.Rename(oldPath, newPath); err != nil {
-			return err
-		}
-	} else {
-		if err := s.btrfsPoolVolumesSnapshot(oldPath, newPath, true); err != nil {
-			return err
-		}
-		if err := s.btrfsPoolVolumesDelete(oldPath); err != nil {
+	// COMMENT(brauner): Create the snapshot subvole path on the storage
+	// pool.
+	snapshotSubvolumePath := s.getSnapshotSubvolumePath(s.pool.PoolName, snapshotContainer.Name())
+	snapshotSubvolumeName := getSnapshotMountPoint(s.pool.PoolName, snapshotContainer.Name())
+	if !shared.PathExists(snapshotSubvolumePath) {
+		err := os.MkdirAll(snapshotSubvolumePath, 0711)
+		if err != nil {
 			return err
 		}
 	}
 
-	// Remove the old parent (on container rename) if its empty.
-	if ok, _ := shared.PathIsEmpty(filepath.Dir(oldPath)); ok {
-		os.Remove(filepath.Dir(oldPath))
+	err = s.btrfsPoolVolumeCreate(snapshotSubvolumeName)
+	if err != nil {
+		return err
 	}
 
-	return nil
-}
+	sourceFields := strings.SplitN(snapshotContainer.Name(), shared.SnapshotDelimiter, 2)
+	sourceName := sourceFields[0]
+	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.PoolName, "snapshots", sourceName)
+	snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
+	if !shared.PathExists(snapshotMntPointSymlink) {
+		err := createContainerMountpoint(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink, snapshotContainer.IsPrivileged())
+		if err != nil {
+			return err
+		}
+	}
 
-func (s *storageBtrfs) ContainerSnapshotCreateEmpty(snapshotContainer container) error {
-	dpath := snapshotContainer.Path()
-	return s.btrfsPoolVolumeCreate(dpath)
+	// COMMENT(brauner): Create the empty snapshot.
+	return s.btrfsPoolVolumeCreate(snapshotSubvolumePath)
 }
 
 func (s *storageBtrfs) ImageCreate(fingerprint string) error {
@@ -624,19 +1048,10 @@ func (s *storageBtrfs) ImageCreate(fingerprint string) error {
 		return fmt.Errorf("No \"source\" property found for the storage pool.")
 	}
 
-	poolMntPoint := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
-
-	// COMMENT(brauner): If the storage pool is currently unmounted, mount
-	// it. The btrfs storage pool will be mounted at
-	// ${LXD_DIR}/storage-pools/<pool_name>.
-	if !shared.IsMountPoint(poolMntPoint) {
-		err, _ := s.StoragePoolMount()
-		if err != nil {
-			return err
-		}
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
 	}
-	// COMMENT(brauner): Unmount the pool again.
-	defer s.StoragePoolUmount()
 
 	// COMMENT(brauner): We can only create the btrfs subvolume under the
 	// mounted storage pool. The on-disk layout for images on a btrfs
@@ -644,9 +1059,9 @@ func (s *storageBtrfs) ImageCreate(fingerprint string) error {
 	// ${LXD_DIR}/storage-pools/<pool_name>/images/. The btrfs tool will
 	// complain if the intermediate path does not exist, so create it if it
 	// doesn't already.
-	imageSubvolPath := filepath.Join(poolMntPoint, "images")
-	if !shared.PathExists(imageSubvolPath) {
-		err := os.MkdirAll(imageSubvolPath, 0700)
+	imageSubvolumePath := s.getImageSubvolumePath(s.pool.PoolName)
+	if !shared.PathExists(imageSubvolumePath) {
+		err := os.MkdirAll(imageSubvolumePath, 0700)
 		if err != nil {
 			return err
 		}
@@ -655,73 +1070,43 @@ func (s *storageBtrfs) ImageCreate(fingerprint string) error {
 	// COMMENT(brauner): Create a temporary rw btrfs subvolume. From this rw
 	// subvolume we'll create a ro snapshot below. The path with which we do
 	// this is
-	// ${LXD_DIR}/storage-pools/<pool_name>/images/<fingerprint_tmp>.
-	subvolTmpPath := filepath.Join(imageSubvolPath, fmt.Sprintf("%s_tmp", fingerprint))
-	if err := s.btrfsPoolVolumeCreate(subvolTmpPath); err != nil {
+	// ${LXD_DIR}/storage-pools/<pool_name>/images/<fingerprint>@<pool_name>_tmp.
+	imageSubvolumeName := getImageMountPoint(s.pool.PoolName, fingerprint)
+	tmpImageSubvolumeName := fmt.Sprintf("%s_tmp", imageSubvolumeName)
+	err = s.btrfsPoolVolumeCreate(tmpImageSubvolumeName)
+	if err != nil {
 		return err
 	}
 	// COMMENT(brauner): Delete volume on error.
 	undo := true
 	defer func() {
 		if undo {
-			s.btrfsPoolVolumeDelete(subvolTmpPath)
+			s.btrfsPoolVolumeDelete(tmpImageSubvolumeName)
 		}
 	}()
 
-	// COMMENT(brauner): Now create the mountpoint for the image:
-	// ${LXD_DIR}/images/<fingerprint>@<pool_name>.
-	imageMntPoint := filepath.Join(shared.VarPath("images"), fmt.Sprintf("%s@%s", fingerprint, s.pool.PoolName))
-	err := os.MkdirAll(imageMntPoint, 0700)
-	if err != nil {
-		return err
-	}
-	// COMMENT(brauner): Delete mountpoint on error, otherwise later image
-	// creations will fail (naggy btrfs).
-	defer func() {
-		if undo {
-			os.Remove(imageMntPoint)
-		}
-	}()
-
-	// COMMENT(brauner): Mount the storage volume for the image on its
-	// mountpoint.
-	// mount("/dev/loop<n>", "/path/to/target", "btrfs", 0, "subvol=subvol/name")
-	fd, loopDev := prepareLoopDev(source)
-	if fd < 0 {
-		return fmt.Errorf("Could not prepare loop device.")
-	}
-	file := os.NewFile(uintptr(fd), "")
-	defer file.Close()
-
-	subvol := fmt.Sprintf("subvol=images/%s_tmp", fingerprint)
-	err = tryMount(loopDev, imageMntPoint, "btrfs", 0, subvol)
-	if err != nil {
-		return err
-	}
-	// COMMENT(brauner): Unmount after we are done with the image.
-	defer tryUnmount(imageMntPoint, 0)
-
 	// COMMENT(brauner): Unpack the image in imageMntPoint.
 	imagePath := shared.VarPath("images", fingerprint)
-	if err := unpackImage(s.d, imagePath, imageMntPoint, storageTypeBtrfs); err != nil {
+	err = unpackImage(s.d, imagePath, tmpImageSubvolumeName, storageTypeBtrfs)
+	if err != nil {
 		return err
 	}
 
 	// COMMENT(brauner): Now create a read-only snapshot of the subvolume.
 	// The path with which we do this is
-	// ${LXD_DIR}/storage-pools/<pool_name>/images/<fingerprint at readonly>.
-	subvolSnapshotPath := filepath.Join(imageSubvolPath, fingerprint)
-	if err := s.btrfsPoolVolumeSnapshot(subvolTmpPath, subvolSnapshotPath, true); err != nil {
+	// ${LXD_DIR}/storage-pools/<pool_name>/images/<fingerprint>.
+	err = s.btrfsPoolVolumeSnapshot(tmpImageSubvolumeName, imageSubvolumeName, true)
+	if err != nil {
 		return err
 	}
 
 	defer func() {
 		if undo {
-			s.btrfsPoolVolumeDelete(subvolSnapshotPath)
+			s.btrfsPoolVolumeDelete(imageSubvolumeName)
 		}
 	}()
 
-	err = s.btrfsPoolVolumeDelete(subvolTmpPath)
+	err = s.btrfsPoolVolumeDelete(tmpImageSubvolumeName)
 	if err != nil {
 		return err
 	}
@@ -732,47 +1117,38 @@ func (s *storageBtrfs) ImageCreate(fingerprint string) error {
 }
 
 func (s *storageBtrfs) ImageDelete(fingerprint string) error {
-	poolMntPoint := filepath.Join(shared.VarPath("storage-pools"), s.pool.PoolName)
-
-	// COMMENT(brauner): If the storage pool is currently unmounted, mount
-	// it. The btrfs storage pool will be mounted at
-	// ${LXD_DIR}/storage-pools/<pool_name>.
-	if !shared.IsMountPoint(poolMntPoint) {
-		err, _ := s.StoragePoolMount()
-		if err != nil {
-			return err
-		}
-	}
-	// COMMENT(brauner): Unmount the pool again.
-	defer s.StoragePoolUmount()
-
-	// COMMENT(brauner): Now check whether the image is mounted at:
-	// ${LXD_DIR}/images/<fingerprint>@<pool_name>. If so, unmount it.
-	imageMntPoint := filepath.Join(shared.VarPath("images"), fmt.Sprintf("%s@%s", fingerprint, s.pool.PoolName))
-	if shared.IsMountPoint(imageMntPoint) {
-		err := tryUnmount(imageMntPoint, 0)
-		if err != nil {
-			return err
-		}
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err
 	}
 
 	// COMMENT(brauner): Delete the btrfs subvolume. The path with which we
 	// do this is ${LXD_DIR}/storage-pools/<pool_name>/images/<fingerprint at readonly>.
-	imageSubvolPath := filepath.Join(poolMntPoint, "images")
-	subvolPath := filepath.Join(imageSubvolPath, fingerprint)
-	err := s.btrfsPoolVolumeDelete(subvolPath)
+	imageSubvolumeName := getImageMountPoint(s.pool.PoolName, fingerprint)
+	err = s.btrfsPoolVolumeDelete(imageSubvolumeName)
 	if err != nil {
 		return err
 	}
 
 	// COMMENT(brauner): Now delete the mountpoint for the image:
 	// ${LXD_DIR}/images/<fingerprint>@<pool_name>.
-	os.RemoveAll(imageMntPoint)
+	if shared.PathExists(imageSubvolumeName) {
+		err := os.RemoveAll(imageSubvolumeName)
+		if err != nil {
+			return err
+		}
+	}
 
 	return nil
 }
 
 func (s *storageBtrfs) ImageMount(fingerprint string) (error, bool) {
+	// COMMENT(brauner): The storage pool must be mounted.
+	err, _ := s.StoragePoolMount()
+	if err != nil {
+		return err, false
+	}
+
 	return nil, true
 }
 
@@ -783,7 +1159,7 @@ func (s *storageBtrfs) ImageUmount(fingerprint string) (error, bool) {
 func (s *storageBtrfs) btrfsPoolVolumeCreate(subvol string) error {
 	parentDestPath := filepath.Dir(subvol)
 	if !shared.PathExists(parentDestPath) {
-		if err := os.MkdirAll(parentDestPath, 0700); err != nil {
+		if err := os.MkdirAll(parentDestPath, 0711); err != nil {
 			return err
 		}
 	}
@@ -948,20 +1324,6 @@ func (s *storageBtrfs) btrfsPoolVolumesDelete(subvol string) error {
  */
 func (s *storageBtrfs) btrfsPoolVolumeSnapshot(
 	source string, dest string, readonly bool) error {
-
-	parentDestPath := filepath.Dir(dest)
-	if !shared.PathExists(parentDestPath) {
-		if err := os.MkdirAll(parentDestPath, 0700); err != nil {
-			return err
-		}
-	}
-
-	if shared.PathExists(dest) {
-		if err := os.Remove(dest); err != nil {
-			return err
-		}
-	}
-
 	var output []byte
 	var err error
 	if readonly {
@@ -996,9 +1358,7 @@ func (s *storageBtrfs) btrfsPoolVolumeSnapshot(
 	return err
 }
 
-func (s *storageBtrfs) btrfsPoolVolumesSnapshot(
-	source string, dest string, readonly bool) error {
-
+func (s *storageBtrfs) btrfsPoolVolumesSnapshot(source string, dest string, readonly bool) error {
 	// Get a list of subvolumes of the root
 	subsubvols, err := s.btrfsPoolVolumesGet(source)
 	if err != nil {
@@ -1026,7 +1386,6 @@ func (s *storageBtrfs) btrfsPoolVolumesSnapshot(
 			path.Join(source, subsubvol),
 			path.Join(dest, subsubvol),
 			readonly); err != nil {
-
 			return err
 		}
 	}

From 34b4db74c046b8ccdb42d4895e66786281cf6e87 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 25 Jan 2017 13:12:47 +0100
Subject: [PATCH 46/63] storage: reimplement dir backend

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_dir.go | 399 ++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 289 insertions(+), 110 deletions(-)

diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index e17a251..24bd521 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -4,7 +4,6 @@ import (
 	"fmt"
 	"os"
 	"os/exec"
-	"path/filepath"
 	"strings"
 
 	"github.com/gorilla/websocket"
@@ -56,18 +55,49 @@ func (s *storageDir) StoragePoolCheck() error {
 }
 
 func (s *storageDir) StoragePoolCreate() error {
-	return nil
-}
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
+
+	err := os.MkdirAll(source, 0711)
+	if err != nil {
+		return err
+	}
+
+	prefix := shared.VarPath("storage-pools")
+	if !strings.HasPrefix(source, prefix) {
+		// symlink from storage-pools to pool x
+		storagePoolSymlink := getStoragePoolMountPoint(s.pool.PoolName)
+		err := os.Symlink(source, storagePoolSymlink)
+		if err != nil {
+			return err
+		}
+	}
 
-func (s *storageDir) StoragePoolVolumeCreate() error {
 	return nil
 }
 
 func (s *storageDir) StoragePoolDelete() error {
-	return nil
-}
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
+
+	err := os.RemoveAll(source)
+	if err != nil {
+		return err
+	}
+
+	prefix := shared.VarPath("storage-pools")
+	if !strings.HasPrefix(source, prefix) {
+		storagePoolSymlink := getStoragePoolMountPoint(s.pool.PoolName)
+		err := os.Remove(storagePoolSymlink)
+		if err != nil {
+			return err
+		}
+	}
 
-func (s *storageDir) StoragePoolVolumeDelete() error {
 	return nil
 }
 
@@ -79,14 +109,6 @@ func (s *storageDir) StoragePoolUmount() (error, bool) {
 	return nil, true
 }
 
-func (s *storageDir) StoragePoolVolumeMount() (error, bool) {
-	return nil, true
-}
-
-func (s *storageDir) StoragePoolVolumeUmount() (error, bool) {
-	return nil, true
-}
-
 func (s *storageDir) GetStoragePoolWritable() api.StoragePoolPut {
 	return s.pool.Writable()
 }
@@ -115,47 +137,90 @@ func (s *storageDir) StoragePoolUpdate(changedConfig []string) error {
 	return nil
 }
 
+// COMMENT(brauner): Functions dealing with storage pools.
+func (s *storageDir) StoragePoolVolumeCreate() error {
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
+
+	storageVolumePath := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
+	err := os.MkdirAll(storageVolumePath, 0711)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (s *storageDir) StoragePoolVolumeDelete() error {
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
+
+	storageVolumePath := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
+	err := os.RemoveAll(storageVolumePath)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (s *storageDir) StoragePoolVolumeMount() (error, bool) {
+	return nil, true
+}
+
+func (s *storageDir) StoragePoolVolumeUmount() (error, bool) {
+	return nil, true
+}
+
 func (s *storageDir) StoragePoolVolumeUpdate(changedConfig []string) error {
 	return nil
 }
 
 func (s *storageDir) ContainerCreate(container container) error {
-	cPath := container.Path()
-	if err := os.MkdirAll(cPath, 0755); err != nil {
-		return fmt.Errorf("Error creating containers directory")
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
 	}
 
-	if container.IsPrivileged() {
-		if err := os.Chmod(cPath, 0700); err != nil {
-			return err
-		}
+	privileged := container.IsPrivileged()
+	containerName := container.Name()
+	containerMntPoint := getContainerMountPoint(s.pool.PoolName, containerName)
+	err := createContainerMountpoint(containerMntPoint, container.Path(), privileged)
+	if err != nil {
+		return err
 	}
 
 	return container.TemplateApply("create")
 }
 
-func (s *storageDir) ContainerCreateFromImage(
-	container container, imageFingerprint string) error {
-
-	rootfsPath := container.RootfsPath()
-	if err := os.MkdirAll(rootfsPath, 0755); err != nil {
-		return fmt.Errorf("Error creating rootfs directory")
+func (s *storageDir) ContainerCreateFromImage(container container, imageFingerprint string) error {
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
 	}
 
-	if container.IsPrivileged() {
-		if err := os.Chmod(container.Path(), 0700); err != nil {
-			return err
-		}
+	privileged := container.IsPrivileged()
+	containerName := container.Name()
+	containerMntPoint := getContainerMountPoint(s.pool.PoolName, containerName)
+	err := createContainerMountpoint(containerMntPoint, container.Path(), privileged)
+	if err != nil {
+		return err
 	}
 
 	imagePath := shared.VarPath("images", imageFingerprint)
-	if err := unpackImage(s.d, imagePath, container.Path(), storageTypeDir); err != nil {
+	err = unpackImage(s.d, imagePath, containerMntPoint, storageTypeDir)
+	if err != nil {
 		s.ContainerDelete(container)
 		return err
 	}
 
-	if !container.IsPrivileged() {
-		if err := s.shiftRootfs(container); err != nil {
+	if !privileged {
+		err := s.shiftRootfs(container)
+		if err != nil {
 			s.ContainerDelete(container)
 			return err
 		}
@@ -169,42 +234,80 @@ func (s *storageDir) ContainerCanRestore(container container, sourceContainer co
 }
 
 func (s *storageDir) ContainerDelete(container container) error {
-	cPath := container.Path()
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
 
-	if !shared.PathExists(cPath) {
-		return nil
+	// COMMENT(brauner): Delete the container on its storage pool:
+	// ${POOL}/containers/<container_name>
+	containerName := container.Name()
+	containerMntPoint := getContainerMountPoint(s.pool.PoolName, containerName)
+	if shared.PathExists(containerMntPoint) {
+		err := os.RemoveAll(containerMntPoint)
+		if err != nil {
+			// RemovaAll fails on very long paths, so attempt an rm -Rf
+			output, err := exec.Command("rm", "-Rf", containerMntPoint).CombinedOutput()
+			if err != nil {
+				s.log.Error("ContainerDelete: failed", log.Ctx{"path": containerMntPoint, "output": output})
+				return fmt.Errorf("Error cleaning up %s: %s", containerMntPoint, string(output))
+			}
+		}
 	}
 
-	err := os.RemoveAll(cPath)
+	err := deleteContainerMountpoint(containerMntPoint, container.Path(), s.GetStorageTypeName())
 	if err != nil {
-		// RemovaAll fails on very long paths, so attempt an rm -Rf
-		output, err := exec.Command("rm", "-Rf", cPath).CombinedOutput()
+		return err
+	}
+
+	// COMMENT(brauner): Delete potential leftover snapshot mountpoints.
+	snapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, container.Name())
+	if shared.PathExists(snapshotMntPoint) {
+		err := os.RemoveAll(snapshotMntPoint)
 		if err != nil {
-			s.log.Error("ContainerDelete: failed", log.Ctx{"cPath": cPath, "output": output})
-			return fmt.Errorf("Error cleaning up %s: %s", cPath, string(output))
+			return err
+		}
+	}
+
+	// COMMENT(brauner): Delete potential leftover snapshot symlinks:
+	// ${LXD_DIR}/snapshots/<container_name> -> ${POOL}/snapshots/<container_name>
+	snapshotSymlink := shared.VarPath("snapshots", container.Name())
+	if shared.PathExists(snapshotSymlink) {
+		err := os.Remove(snapshotSymlink)
+		if err != nil {
+			return err
 		}
 	}
 
 	return nil
 }
 
-func (s *storageDir) ContainerCopy(
-	container container, sourceContainer container) error {
+func (s *storageDir) ContainerCopy(container container, sourceContainer container) error {
+	// COMMENT(brauner): Deal with the source container.
+	sourcePool := sourceContainer.Storage().ContainerPoolGet()
+	sourceContainerConfig := sourceContainer.Storage().GetStoragePoolWritable()
+	sourceSource := sourceContainerConfig.PoolConfig["source"]
+	if sourceSource == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
 
-	oldPath := sourceContainer.Path()
-	newPath := container.Path()
+	targetIsPrivileged := container.IsPrivileged()
+	targetContainerMntPoint := getContainerMountPoint(s.pool.PoolName, container.Name())
+	targetContainerSymlink := container.Path()
+	err := createContainerMountpoint(targetContainerMntPoint, targetContainerSymlink, targetIsPrivileged)
+	if err != nil {
+		return err
+	}
 
-	/*
-	 * Copy by using rsync
-	 */
-	output, err := storageRsyncCopy(oldPath, newPath)
+	sourceContainerMntPoint := getContainerMountPoint(sourcePool, sourceContainer.Name())
+	output, err := storageRsyncCopy(sourceContainerMntPoint, targetContainerMntPoint)
 	if err != nil {
 		s.ContainerDelete(container)
 		s.log.Error("ContainerCopy: rsync failed", log.Ctx{"output": string(output)})
 		return fmt.Errorf("rsync failed: %s", string(output))
 	}
 
-	err = s.setUnprivUserAcl(sourceContainer, container.Path())
+	err = s.setUnprivUserAcl(sourceContainer, targetContainerMntPoint)
 	if err != nil {
 		return err
 	}
@@ -221,17 +324,45 @@ func (s *storageDir) ContainerUmount(name string, path string) (error, bool) {
 }
 
 func (s *storageDir) ContainerRename(container container, newName string) error {
-	oldName := container.Name()
-
-	oldPath := container.Path()
-	newPath := containerPath(newName, false)
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
 
-	if err := os.Rename(oldPath, newPath); err != nil {
+	oldContainerMntPoint := getContainerMountPoint(s.pool.PoolName, container.Name())
+	oldContainerSymlink := shared.VarPath("containers", container.Name())
+	newContainerMntPoint := getContainerMountPoint(s.pool.PoolName, newName)
+	newContainerSymlink := shared.VarPath("containers", newName)
+	err := renameContainerMountpoint(oldContainerMntPoint, oldContainerSymlink, newContainerMntPoint, newContainerSymlink)
+	if err != nil {
 		return err
 	}
 
-	if shared.PathExists(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName))) {
-		err := os.Rename(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName)), shared.VarPath(fmt.Sprintf("snapshots/%s", newName)))
+	// COMMENT(brauner): Rename the snapshot mountpoint for the container if
+	// existing:
+	// ${POOL}/snapshots/<old_container_name> to ${POOL}/snapshots/<new_container_name>
+	oldSnapshotsMntPoint := getSnapshotMountPoint(s.pool.PoolName, container.Name())
+	newSnapshotsMntPoint := getSnapshotMountPoint(s.pool.PoolName, newName)
+	if shared.PathExists(oldSnapshotsMntPoint) {
+		err = os.Rename(oldSnapshotsMntPoint, newSnapshotsMntPoint)
+		if err != nil {
+			return err
+		}
+	}
+
+	// COMMENT(brauner): Remove the old snapshot symlink:
+	// ${LXD_DIR}/snapshots/<old_container_name>
+	oldSnapshotSymlink := shared.VarPath("snapshots", container.Name())
+	newSnapshotSymlink := shared.VarPath("snapshots", newName)
+	if shared.PathExists(oldSnapshotSymlink) {
+		err := os.Remove(oldSnapshotSymlink)
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Create the new snapshot symlink:
+		// ${LXD_DIR}/snapshots/<new_container_name> -> ${POOL}/snapshots/<new_container_name>
+		err = os.Symlink(newSnapshotsMntPoint, newSnapshotSymlink)
 		if err != nil {
 			return err
 		}
@@ -240,17 +371,12 @@ func (s *storageDir) ContainerRename(container container, newName string) error
 	return nil
 }
 
-func (s *storageDir) ContainerRestore(
-	container container, sourceContainer container) error {
-
+func (s *storageDir) ContainerRestore(container container, sourceContainer container) error {
 	targetPath := container.Path()
 	sourcePath := sourceContainer.Path()
 
 	// Restore using rsync
-	output, err := storageRsyncCopy(
-		sourcePath,
-		targetPath)
-
+	output, err := storageRsyncCopy(sourcePath, targetPath)
 	if err != nil {
 		s.log.Error(
 			"ContainerRestore: rsync failed",
@@ -275,98 +401,151 @@ func (s *storageDir) ContainerGetUsage(container container) (int64, error) {
 	return -1, fmt.Errorf("The directory container backend doesn't support quotas.")
 }
 
-func (s *storageDir) ContainerSnapshotCreate(
-	snapshotContainer container, sourceContainer container) error {
-
-	oldPath := sourceContainer.Path()
-	newPath := snapshotContainer.Path()
+func (s *storageDir) ContainerSnapshotCreate(snapshotContainer container, sourceContainer container) error {
+	// COMMENT(brauner): Create the path for the snapshot.
+	targetContainerName := snapshotContainer.Name()
+	targetContainerMntPoint := getSnapshotMountPoint(s.pool.PoolName, targetContainerName)
+	err := os.MkdirAll(targetContainerMntPoint, 0711)
+	if err != nil {
+		return err
+	}
 
-	/*
-	 * Copy by using rsync
-	 */
 	rsync := func(snapshotContainer container, oldPath string, newPath string) error {
 		output, err := storageRsyncCopy(oldPath, newPath)
 		if err != nil {
 			s.ContainerDelete(snapshotContainer)
-			s.log.Error("ContainerSnapshotCreate: rsync failed",
-				log.Ctx{"output": string(output)})
-
+			s.log.Error("ContainerSnapshotCreate: rsync failed", log.Ctx{"output": string(output)})
 			return fmt.Errorf("rsync failed: %s", string(output))
 		}
 		return nil
 	}
 
-	if err := rsync(snapshotContainer, oldPath, newPath); err != nil {
+	sourcePool := sourceContainer.Storage().ContainerPoolGet()
+	sourceContainerName := sourceContainer.Name()
+	sourceContainerMntPoint := getContainerMountPoint(sourcePool, sourceContainerName)
+	err = rsync(snapshotContainer, sourceContainerMntPoint, targetContainerMntPoint)
+	if err != nil {
 		return err
 	}
 
 	if sourceContainer.IsRunning() {
-		/* This is done to ensure consistency when snapshotting. But we
-		 * probably shouldn't fail just because of that.
-		 */
+		// This is done to ensure consistency when snapshotting. But we
+		// probably shouldn't fail just because of that.
 		s.log.Debug("ContainerSnapshotCreate: trying to freeze and rsync again to ensure consistency.")
-		if err := sourceContainer.Freeze(); err != nil {
+
+		err := sourceContainer.Freeze()
+		if err != nil {
 			s.log.Warn("ContainerSnapshotCreate: trying to freeze and rsync again failed.")
 			return nil
 		}
 
-		if err := rsync(snapshotContainer, oldPath, newPath); err != nil {
+		err = rsync(snapshotContainer, sourceContainerMntPoint, targetContainerMntPoint)
+		if err != nil {
 			return err
 		}
 
 		defer sourceContainer.Unfreeze()
 	}
 
+	// COMMENT(brauner): Check if the symlink
+	// ${LXD_DIR}/snapshots/<source_container_name> -> ${POOL_PATH}/snapshots/<source_container_name>
+	// exists and if not create it.
+	sourceContainerSymlink := shared.VarPath("snapshots", sourceContainerName)
+	sourceContainerSymlinkTarget := getSnapshotMountPoint(sourcePool, sourceContainerName)
+	if !shared.PathExists(sourceContainerSymlink) {
+		err = os.Symlink(sourceContainerSymlinkTarget, sourceContainerSymlink)
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
 func (s *storageDir) ContainerSnapshotCreateEmpty(snapshotContainer container) error {
-	return os.MkdirAll(snapshotContainer.Path(), 0700)
-}
-
-func (s *storageDir) ContainerSnapshotDelete(
-	snapshotContainer container) error {
-	err := s.ContainerDelete(snapshotContainer)
+	// COMMENT(brauner): Create the path for the snapshot.
+	targetContainerName := snapshotContainer.Name()
+	targetContainerMntPoint := getSnapshotMountPoint(s.pool.PoolName, targetContainerName)
+	err := os.MkdirAll(targetContainerMntPoint, 0711)
 	if err != nil {
-		return fmt.Errorf("Error deleting snapshot %s: %s", snapshotContainer.Name(), err)
+		return err
 	}
 
-	oldPathParent := filepath.Dir(snapshotContainer.Path())
-	if ok, _ := shared.PathIsEmpty(oldPathParent); ok {
-		os.Remove(oldPathParent)
+	// COMMENT(brauner): Check if the symlink
+	// ${LXD_DIR}/snapshots/<source_container_name> -> ${POOL_PATH}/snapshots/<source_container_name>
+	// exists and if not create it.
+	fields := strings.SplitN(targetContainerName, shared.SnapshotDelimiter, 2)
+	sourceContainerName := fields[0]
+	sourceContainerSymlink := shared.VarPath("snapshots", sourceContainerName)
+	sourceContainerSymlinkTarget := getSnapshotMountPoint(s.pool.PoolName, sourceContainerName)
+	if !shared.PathExists(sourceContainerSymlink) {
+		err = os.Symlink(sourceContainerSymlinkTarget, sourceContainerSymlink)
+		if err != nil {
+			return err
+		}
 	}
 
 	return nil
 }
 
-func (s *storageDir) ContainerSnapshotRename(
-	snapshotContainer container, newName string) error {
-
-	oldPath := snapshotContainer.Path()
-	newPath := containerPath(newName, true)
+func (s *storageDir) ContainerSnapshotDelete(snapshotContainer container) error {
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
 
-	// Create the new parent.
-	if strings.Contains(snapshotContainer.Name(), "/") {
-		if !shared.PathExists(filepath.Dir(newPath)) {
-			os.MkdirAll(filepath.Dir(newPath), 0700)
+	// COMMENT(brauner): Delete the snapshot on its storage pool:
+	// ${POOL}/snapshots/<snapshot_name>
+	snapshotContainerName := snapshotContainer.Name()
+	snapshotContainerMntPoint := getSnapshotMountPoint(s.pool.PoolName, snapshotContainerName)
+	if shared.PathExists(snapshotContainerMntPoint) {
+		err := os.RemoveAll(snapshotContainerMntPoint)
+		if err != nil {
+			return err
 		}
 	}
 
-	// Now rename the snapshot.
-	if err := os.Rename(oldPath, newPath); err != nil {
-		return err
-	}
+	// COMMENT(brauner): Check if we can remove the snapshot symlink:
+	// ${LXD_DIR}/snapshots/<container_name> -> ${POOL}/snapshots/<container_name>
+	// by checking if the directory is empty.
+	fields := strings.SplitN(snapshotContainerName, shared.SnapshotDelimiter, 2)
+	sourceContainerName := fields[0]
+	snapshotContainerPath := getSnapshotMountPoint(s.pool.PoolName, sourceContainerName)
+	empty, _ := shared.PathIsEmpty(snapshotContainerPath)
+	if empty == true {
+		// COMMENT(brauner): Remove the snapshot directory for the
+		// container:
+		// ${POOL}/snapshots/<source_container_name>
+		err := os.Remove(snapshotContainerPath)
+		if err != nil {
+			return err
+		}
 
-	// Remove the old parent (on container rename) if its empty.
-	if strings.Contains(snapshotContainer.Name(), "/") {
-		if ok, _ := shared.PathIsEmpty(filepath.Dir(oldPath)); ok {
-			os.Remove(filepath.Dir(oldPath))
+		snapshotSymlink := shared.VarPath("snapshots", sourceContainerName)
+		if shared.PathExists(snapshotSymlink) {
+			err := os.Remove(snapshotSymlink)
+			if err != nil {
+				return err
+			}
 		}
 	}
 
 	return nil
 }
 
+func (s *storageDir) ContainerSnapshotRename(snapshotContainer container, newName string) error {
+	// COMMENT(brauner): Rename the mountpoint for the snapshot:
+	// ${POOL}/snapshots/<old_snapshot_name> to ${POOL}/snapshots/<new_snapshot_name>
+	oldSnapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, snapshotContainer.Name())
+	newSnapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, newName)
+	err := os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
 func (s *storageDir) ContainerSnapshotStart(container container) error {
 	return nil
 }

From 547159e59d9ea6850af96aaf3b15a1d9accc147b Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 25 Jan 2017 13:17:04 +0100
Subject: [PATCH 47/63] storage: reimplement zfs backend

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_zfs.go | 501 +++++++++++++++++++++++++++++++----------------------
 1 file changed, 297 insertions(+), 204 deletions(-)

diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 0cf5a90..bc7c988 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -99,7 +99,7 @@ func (s *storageZfs) StoragePoolCheck() error {
 	err := s.zfsPoolCheck(s.pool.PoolName)
 	if err != nil {
 		source := s.pool.PoolConfig["source"]
-		if shared.PathExists(source) {
+		if filepath.IsAbs(source) && shared.PathExists(source) {
 			_ = loadModule("zfs")
 
 			output, err := exec.Command("zpool", "import", source, s.pool.PoolName).CombinedOutput()
@@ -132,27 +132,13 @@ func (s *storageZfs) StoragePoolCreate() error {
 		return err
 	}
 
-	return nil
-}
-
-func (s *storageZfs) StoragePoolVolumeCreate() error {
-	fs := fmt.Sprintf("custom/%s", s.volume.VolumeName)
-	fsMountpoint := fmt.Sprintf("%s@%s", shared.VarPath(fs), s.pool.PoolName)
-
-	err := s.zfsPoolVolumeCreate(fs)
-	if err != nil {
-		return err
-	}
-
-	err = s.zfsPoolVolumeSet(fs, "mountpoint", fsMountpoint)
+	storagePoolMntPoint := getStoragePoolMountPoint(s.pool.PoolName)
+	err = os.MkdirAll(storagePoolMntPoint, 0755)
 	if err != nil {
+		s.log.Error("Failed to create mount point for storage pool: %s.", s.pool.PoolName)
 		return err
 	}
 
-	if !shared.IsMountPoint(fsMountpoint) {
-		s.zfsPoolVolumeMount(fs)
-	}
-
 	return nil
 }
 
@@ -162,6 +148,14 @@ func (s *storageZfs) StoragePoolDelete() error {
 		return err
 	}
 
+	storagePoolMntPoint := getStoragePoolMountPoint(s.pool.PoolName)
+	if shared.PathExists(storagePoolMntPoint) {
+		err := os.RemoveAll(storagePoolMntPoint)
+		if err != nil {
+			s.log.Error("Failed to delete mountpoint for storage pool volume: %s.", s.pool.PoolName)
+		}
+	}
+
 	return nil
 }
 
@@ -173,19 +167,40 @@ func (s *storageZfs) StoragePoolUmount() (error, bool) {
 	return nil, true
 }
 
+func (s *storageZfs) StoragePoolVolumeCreate() error {
+	fs := fmt.Sprintf("custom/%s", s.volume.VolumeName)
+	customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
+
+	err := s.zfsPoolVolumeCreate(fs)
+	if err != nil {
+		return err
+	}
+
+	err = s.zfsPoolVolumeSet(fs, "mountpoint", customPoolVolumeMntPoint)
+	if err != nil {
+		return err
+	}
+
+	if !shared.IsMountPoint(customPoolVolumeMntPoint) {
+		s.zfsPoolVolumeMount(fs)
+	}
+
+	return nil
+}
+
 func (s *storageZfs) StoragePoolVolumeDelete() error {
 	fs := fmt.Sprintf("custom/%s", s.volume.VolumeName)
-	fsMountpoint := fmt.Sprintf("%s@%s", shared.VarPath(fs), s.pool.PoolName)
+	customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
 
 	err := s.zfsPoolVolumeDestroy(fs)
 	if err != nil {
 		return err
 	}
 
-	if shared.PathExists(fsMountpoint) {
-		err := os.RemoveAll(fsMountpoint)
+	if shared.PathExists(customPoolVolumeMntPoint) {
+		err := os.RemoveAll(customPoolVolumeMntPoint)
 		if err != nil {
-			shared.LogWarnf("Failed to remove mountpoint \"%s\" for storage volume \"%s\".", fsMountpoint, s.volume.VolumeName)
+			shared.LogWarnf("Failed to remove mountpoint \"%s\" for storage volume \"%s\".", customPoolVolumeMntPoint, s.volume.VolumeName)
 		}
 	}
 
@@ -193,19 +208,10 @@ func (s *storageZfs) StoragePoolVolumeDelete() error {
 }
 
 func (s *storageZfs) StoragePoolVolumeMount() (error, bool) {
-	volApiEndpoint, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.VolumeType)
-	if err != nil {
-		return err, false
-	}
-
-	fs := fmt.Sprintf("%s/%s", volApiEndpoint, s.volume.VolumeName)
-	fsMountpoint := fmt.Sprintf("%s", shared.VarPath(fs))
-
-	if s.volume.VolumeType == storagePoolVolumeTypeNameCustom {
-		fsMountpoint = fmt.Sprintf("%s@%s", fsMountpoint, s.pool.PoolName)
-	}
+	fs := fmt.Sprintf("custom/%s", s.volume.VolumeName)
+	customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
 
-	if !shared.IsMountPoint(fsMountpoint) {
+	if !shared.IsMountPoint(customPoolVolumeMntPoint) {
 		err := s.zfsPoolVolumeMount(fs)
 		if err != nil {
 			return err, false
@@ -216,19 +222,10 @@ func (s *storageZfs) StoragePoolVolumeMount() (error, bool) {
 }
 
 func (s *storageZfs) StoragePoolVolumeUmount() (error, bool) {
-	volApiEndpoint, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.VolumeType)
-	if err != nil {
-		return err, false
-	}
-
-	fs := fmt.Sprintf("%s/%s", volApiEndpoint, s.volume.VolumeName)
-	fsMountpoint := fmt.Sprintf("%s", shared.VarPath(fs))
-
-	if s.volume.VolumeType == storagePoolVolumeTypeNameCustom {
-		fsMountpoint = fmt.Sprintf("%s@%s", fsMountpoint, s.pool.PoolName)
-	}
+	fs := fmt.Sprintf("custom/%s", s.volume.VolumeName)
+	customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
 
-	if shared.IsMountPoint(fsMountpoint) {
+	if shared.IsMountPoint(customPoolVolumeMntPoint) {
 		err := s.zfsPoolVolumeUmount(fs)
 		if err != nil {
 			return err, false
@@ -285,27 +282,44 @@ func (s *storageZfs) StoragePoolVolumeUpdate(changedConfig []string) error {
 // Things we don't need to care about
 func (s *storageZfs) ContainerMount(name string, path string) (error, bool) {
 	fs := fmt.Sprintf("containers/%s", name)
+	containerPoolVolumeMntPoint := getContainerMountPoint(s.pool.PoolName, name)
 
 	// Just in case the container filesystem got unmounted
-	if !shared.IsMountPoint(shared.VarPath(fs)) {
-		err := s.zfsPoolVolumeMount(fs)
-		if err != nil {
-			return err, false
-		}
+	if shared.IsMountPoint(containerPoolVolumeMntPoint) {
+		return nil, false
+	}
+
+	err := s.zfsPoolVolumeMount(fs)
+	if err != nil {
+		return err, false
 	}
 
 	return nil, true
 }
 
 func (s *storageZfs) ContainerUmount(name string, path string) (error, bool) {
+	fs := fmt.Sprintf("containers/%s", name)
+	containerPoolVolumeMntPoint := getContainerMountPoint(s.pool.PoolName, name)
+
+	// Just in case the container filesystem got unmounted
+	if !shared.IsMountPoint(containerPoolVolumeMntPoint) {
+		return nil, false
+	}
+
+	err := s.zfsPoolVolumeUmount(fs)
+	if err != nil {
+		return err, false
+	}
+
 	return nil, true
 }
 
 // Things we do have to care about
 func (s *storageZfs) ContainerCreate(container container) error {
-	cPath := container.Path()
-	fs := fmt.Sprintf("containers/%s", container.Name())
-	fsMountpoint := fmt.Sprintf("%s.zfs", shared.VarPath(fs))
+	containerPath := container.Path()
+	containerName := container.Name()
+	fs := fmt.Sprintf("containers/%s", containerName)
+	containerPoolVolumeMntPoint := getContainerMountPoint(s.pool.PoolName, containerName)
 
 	// Create volume.
 	err := s.zfsPoolVolumeCreate(fs)
@@ -314,29 +328,20 @@ func (s *storageZfs) ContainerCreate(container container) error {
 	}
 
 	// Set mountpoint.
-	err = s.zfsPoolVolumeSet(fs, "mountpoint", fsMountpoint)
+	err = s.zfsPoolVolumeSet(fs, "mountpoint", containerPoolVolumeMntPoint)
 	if err != nil {
 		return err
 	}
 
-	// Check if it got automatically mounted.
-	if !shared.IsMountPoint(shared.VarPath(fs)) {
-		s.zfsPoolVolumeMount(fs)
-	}
-
-	err = os.Symlink(cPath+".zfs", cPath)
+	err, ourMount := s.ContainerMount(containerName, container.Path())
 	if err != nil {
 		return err
 	}
-
-	var mode os.FileMode
-	if container.IsPrivileged() {
-		mode = 0700
-	} else {
-		mode = 0755
+	if ourMount {
+		defer s.ContainerUmount(containerName, container.Path())
 	}
 
-	err = os.Chmod(cPath, mode)
+	err = createContainerMountpoint(containerPoolVolumeMntPoint, containerPath, container.IsPrivileged())
 	if err != nil {
 		return err
 	}
@@ -350,38 +355,45 @@ func (s *storageZfs) ContainerCreate(container container) error {
 }
 
 func (s *storageZfs) ContainerCreateFromImage(container container, fingerprint string) error {
-	cPath := container.Path()
-	fs := fmt.Sprintf("containers/%s", container.Name())
+	containerPath := container.Path()
+	containerName := container.Name()
+	fs := fmt.Sprintf("containers/%s", containerName)
+	containerPoolVolumeMntPoint := getContainerMountPoint(s.pool.PoolName, containerName)
+
 	fsImage := fmt.Sprintf("images/%s", fingerprint)
 
-	err := s.zfsPoolVolumeClone(fsImage, "readonly", fs, true)
+	err := s.zfsPoolVolumeClone(fsImage, "readonly", fs, containerPoolVolumeMntPoint)
 	if err != nil {
 		return err
 	}
 
-	err = os.Symlink(cPath+".zfs", cPath)
+	// Set mountpoint.
+	err = s.zfsPoolVolumeSet(fs, "mountpoint", containerPoolVolumeMntPoint)
 	if err != nil {
 		return err
 	}
 
-	var mode os.FileMode
-	if container.IsPrivileged() {
-		mode = 0700
-	} else {
-		mode = 0755
+	err, ourMount := s.ContainerMount(containerName, containerPath)
+	if err != nil {
+		return err
+	}
+	if ourMount {
+		defer s.ContainerUmount(containerName, container.Path())
 	}
 
-	err = os.Chmod(cPath, mode)
+	privileged := container.IsPrivileged()
+	err = createContainerMountpoint(containerPoolVolumeMntPoint, containerPath, privileged)
 	if err != nil {
 		return err
 	}
 
-	if !container.IsPrivileged() {
+	if privileged {
 		err = s.shiftRootfs(container)
 		if err != nil {
 			return err
 		}
 	}
+
 	err = container.TemplateApply("create")
 	if err != nil {
 		return err
@@ -408,7 +420,9 @@ func (s *storageZfs) ContainerCanRestore(container container, sourceContainer co
 }
 
 func (s *storageZfs) ContainerDelete(container container) error {
-	fs := fmt.Sprintf("containers/%s", container.Name())
+	containerName := container.Name()
+	fs := fmt.Sprintf("containers/%s", containerName)
+	containerPoolVolumeMntPoint := getContainerMountPoint(s.pool.PoolName, containerName)
 
 	if s.zfsPoolVolumeExists(fs) {
 		removable := true
@@ -458,75 +472,86 @@ func (s *storageZfs) ContainerDelete(container container) error {
 		}
 	}
 
-	if shared.PathExists(shared.VarPath(fs)) {
-		err := os.Remove(shared.VarPath(fs))
+	err := deleteContainerMountpoint(containerPoolVolumeMntPoint, container.Path(), s.GetStorageTypeName())
+	if err != nil {
+		return err
+	}
+
+	snapshotZfsDataset := fmt.Sprintf("snapshots/%s", containerName)
+	s.zfsPoolVolumeDestroy(snapshotZfsDataset)
+
+	// COMMENT(brauner): Delete potential leftover snapshot mountpoints.
+	snapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, containerName)
+	if shared.PathExists(snapshotMntPoint) {
+		err := os.RemoveAll(snapshotMntPoint)
 		if err != nil {
 			return err
 		}
 	}
 
-	if shared.PathExists(shared.VarPath(fs) + ".zfs") {
-		err := os.Remove(shared.VarPath(fs) + ".zfs")
+	// COMMENT(brauner): Delete potential leftover snapshot symlinks:
+	// ${LXD_DIR}/snapshots/<container_name> -> ${POOL}/snapshots/<container_name>
+	snapshotSymlink := shared.VarPath("snapshots", containerName)
+	if shared.PathExists(snapshotSymlink) {
+		err := os.Remove(snapshotSymlink)
 		if err != nil {
 			return err
 		}
 	}
 
-	s.zfsPoolVolumeDestroy(fmt.Sprintf("snapshots/%s", container.Name()))
-
 	return nil
 }
 
 func (s *storageZfs) ContainerCopy(container container, sourceContainer container) error {
-	var sourceFs string
-	var sourceSnap string
+	sourceContainerName := sourceContainer.Name()
+	sourceContainerPath := sourceContainer.Path()
+
+	targetContainerName := container.Name()
+	targetContainerPath := container.Path()
+	targetContainerMountPoint := getContainerMountPoint(s.pool.PoolName, targetContainerName)
 
-	sourceFields := strings.SplitN(sourceContainer.Name(), shared.SnapshotDelimiter, 2)
+	sourceZfsDataset := ""
+	sourceZfsDatasetSnapshot := ""
+	sourceFields := strings.SplitN(sourceContainerName, shared.SnapshotDelimiter, 2)
 	sourceName := sourceFields[0]
 
-	destName := container.Name()
-	destFs := fmt.Sprintf("containers/%s", destName)
+	targetZfsDataset := fmt.Sprintf("containers/%s", targetContainerName)
 
 	if len(sourceFields) == 2 {
-		sourceSnap = sourceFields[1]
+		sourceZfsDatasetSnapshot = sourceFields[1]
 	}
 
-	if sourceSnap == "" {
+	if sourceZfsDatasetSnapshot == "" {
 		if s.zfsPoolVolumeExists(fmt.Sprintf("containers/%s", sourceName)) {
-			sourceSnap = fmt.Sprintf("copy-%s", uuid.NewRandom().String())
-			sourceFs = fmt.Sprintf("containers/%s", sourceName)
-			err := s.zfsPoolVolumeSnapshotCreate(fmt.Sprintf("containers/%s", sourceName), sourceSnap)
+			sourceZfsDatasetSnapshot = fmt.Sprintf("copy-%s", uuid.NewRandom().String())
+			sourceZfsDataset = fmt.Sprintf("containers/%s", sourceName)
+			err := s.zfsPoolVolumeSnapshotCreate(fmt.Sprintf("containers/%s", sourceName), sourceZfsDatasetSnapshot)
 			if err != nil {
 				return err
 			}
 		}
 	} else {
-		if s.zfsPoolVolumeExists(fmt.Sprintf("containers/%s at snapshot-%s", sourceName, sourceSnap)) {
-			sourceFs = fmt.Sprintf("containers/%s", sourceName)
-			sourceSnap = fmt.Sprintf("snapshot-%s", sourceSnap)
+		if s.zfsPoolVolumeExists(fmt.Sprintf("containers/%s at snapshot-%s", sourceName, sourceZfsDatasetSnapshot)) {
+			sourceZfsDataset = fmt.Sprintf("containers/%s", sourceName)
+			sourceZfsDatasetSnapshot = fmt.Sprintf("snapshot-%s", sourceZfsDatasetSnapshot)
 		}
 	}
 
-	if sourceFs != "" {
-		err := s.zfsPoolVolumeClone(sourceFs, sourceSnap, destFs, true)
+	if sourceZfsDataset != "" {
+		err := s.zfsPoolVolumeClone(sourceZfsDataset, sourceZfsDatasetSnapshot, targetZfsDataset, targetContainerMountPoint)
 		if err != nil {
 			return err
 		}
 
-		cPath := container.Path()
-		err = os.Symlink(cPath+".zfs", cPath)
+		err, ourMount := s.ContainerMount(targetContainerName, targetContainerPath)
 		if err != nil {
 			return err
 		}
-
-		var mode os.FileMode
-		if container.IsPrivileged() {
-			mode = 0700
-		} else {
-			mode = 0755
+		if ourMount {
+			defer s.ContainerUmount(targetContainerName, targetContainerPath)
 		}
 
-		err = os.Chmod(cPath, mode)
+		err = createContainerMountpoint(targetContainerMountPoint, targetContainerPath, container.IsPrivileged())
 		if err != nil {
 			return err
 		}
@@ -536,7 +561,7 @@ func (s *storageZfs) ContainerCopy(container container, sourceContainer containe
 			return err
 		}
 
-		output, err := storageRsyncCopy(sourceContainer.Path(), container.Path())
+		output, err := storageRsyncCopy(sourceContainerPath, targetContainerPath)
 		if err != nil {
 			return fmt.Errorf("rsync failed: %s", string(output))
 		}
@@ -548,53 +573,65 @@ func (s *storageZfs) ContainerCopy(container container, sourceContainer containe
 func (s *storageZfs) ContainerRename(container container, newName string) error {
 	oldName := container.Name()
 
-	// Unmount the filesystem
-	err := s.zfsPoolVolumeUmount(fmt.Sprintf("containers/%s", oldName))
+	// COMMENT(brauner): Unmount the dataset.
+	err, _ := s.ContainerUmount(oldName, "")
 	if err != nil {
 		return err
 	}
 
-	// Rename the filesystem
-	err = s.zfsPoolVolumeRename(fmt.Sprintf("containers/%s", oldName), fmt.Sprintf("containers/%s", newName))
+	// COMMENT(brauner): Rename the dataset.
+	oldZfsDataset := fmt.Sprintf("containers/%s", oldName)
+	newZfsDataset := fmt.Sprintf("containers/%s", newName)
+	err = s.zfsPoolVolumeRename(oldZfsDataset, newZfsDataset)
 	if err != nil {
 		return err
 	}
 
-	// Update to the new mountpoint
-	err = s.zfsPoolVolumeSet(fmt.Sprintf("containers/%s", newName), "mountpoint", shared.VarPath(fmt.Sprintf("containers/%s.zfs", newName)))
+	// COMMENT(brauner): Set the new mountpoint for the dataset.
+	newContainerMntPoint := getContainerMountPoint(s.pool.PoolName, newName)
+	err = s.zfsPoolVolumeSet(newZfsDataset, "mountpoint", newContainerMntPoint)
 	if err != nil {
 		return err
 	}
 
-	// In case ZFS didn't mount the filesystem, do it ourselves
-	err = s.zfsPoolVolumeMount(fmt.Sprintf("containers/%s", newName))
+	// COMMENT(brauner): Unmount the dataset.
+	err, _ = s.ContainerUmount(newName, "")
 	if err != nil {
 		return err
 	}
 
-	// In case the change of mountpoint didn't remove the old path, do it ourselves
-	if shared.PathExists(shared.VarPath(fmt.Sprintf("containers/%s.zfs", oldName))) {
-		err = os.Remove(shared.VarPath(fmt.Sprintf("containers/%s.zfs", oldName)))
+	// COMMENT(brauner): Create new mountpoint on the storage pool.
+	oldContainerMntPoint := getContainerMountPoint(s.pool.PoolName, oldName)
+	oldContainerMntPointSymlink := container.Path()
+	newContainerMntPointSymlink := shared.VarPath("containers", newName)
+	err = renameContainerMountpoint(oldContainerMntPoint, oldContainerMntPointSymlink, newContainerMntPoint, newContainerMntPointSymlink)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Rename the snapshot mountpoint on the storage pool.
+	oldSnapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, oldName)
+	newSnapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, newName)
+	if shared.PathExists(oldSnapshotMntPoint) {
+		err := os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
 		if err != nil {
 			return err
 		}
 	}
 
-	// Remove the old symlink
-	err = os.Remove(shared.VarPath(fmt.Sprintf("containers/%s", oldName)))
-	if err != nil {
-		return err
-	}
-
-	// Create a new symlink
-	err = os.Symlink(shared.VarPath(fmt.Sprintf("containers/%s.zfs", newName)), shared.VarPath(fmt.Sprintf("containers/%s", newName)))
-	if err != nil {
-		return err
+	// COMMENT(brauner): Remove old symlink.
+	oldSnapshotPath := shared.VarPath("snapshots", oldName)
+	if shared.PathExists(oldSnapshotPath) {
+		err := os.Remove(oldSnapshotPath)
+		if err != nil {
+			return err
+		}
 	}
 
-	// Rename the snapshot path
-	if shared.PathExists(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName))) {
-		err = os.Rename(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName)), shared.VarPath(fmt.Sprintf("snapshots/%s", newName)))
+	// COMMENT(brauner): Create new symlink.
+	newSnapshotPath := shared.VarPath("snapshots", newName)
+	if shared.PathExists(newSnapshotPath) {
+		err := os.Symlink(newSnapshotMntPoint, newSnapshotPath)
 		if err != nil {
 			return err
 		}
@@ -681,25 +718,34 @@ func (s *storageZfs) ContainerGetUsage(container container) (int64, error) {
 }
 
 func (s *storageZfs) ContainerSnapshotCreate(snapshotContainer container, sourceContainer container) error {
-	fields := strings.SplitN(snapshotContainer.Name(), shared.SnapshotDelimiter, 2)
+	snapshotContainerName := snapshotContainer.Name()
+	sourceContainerName := sourceContainer.Name()
+
+	fields := strings.SplitN(snapshotContainerName, shared.SnapshotDelimiter, 2)
 	cName := fields[0]
 	snapName := fmt.Sprintf("snapshot-%s", fields[1])
 
-	err := s.zfsPoolVolumeSnapshotCreate(fmt.Sprintf("containers/%s", cName), snapName)
+	sourceZfsDataset := fmt.Sprintf("containers/%s", cName)
+	err := s.zfsPoolVolumeSnapshotCreate(sourceZfsDataset, snapName)
 	if err != nil {
 		return err
 	}
 
-	if !shared.PathExists(shared.VarPath(fmt.Sprintf("snapshots/%s", cName))) {
-		err = os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", cName)), 0700)
+	snapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, snapshotContainerName)
+	if !shared.PathExists(snapshotMntPoint) {
+		err := os.MkdirAll(snapshotMntPoint, 0700)
 		if err != nil {
 			return err
 		}
 	}
 
-	err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", cName, fields[1])))
-	if err != nil {
-		return err
+	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.PoolName, "snapshots", s.volume.VolumeName)
+	snapshotMntPointSymlink := shared.VarPath("snapshots", sourceContainerName)
+	if !shared.PathExists(snapshotMntPointSymlink) {
+		err := os.Symlink(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
+		if err != nil {
+			return err
+		}
 	}
 
 	return nil
@@ -707,25 +753,60 @@ func (s *storageZfs) ContainerSnapshotCreate(snapshotContainer container, source
 
 func (s *storageZfs) ContainerSnapshotDelete(snapshotContainer container) error {
 	fields := strings.SplitN(snapshotContainer.Name(), shared.SnapshotDelimiter, 2)
-	cName := fields[0]
+	sourceContainerName := fields[0]
 	snapName := fmt.Sprintf("snapshot-%s", fields[1])
 
-	if s.zfsPoolVolumeExists(fmt.Sprintf("containers/%s@%s", cName, snapName)) {
-		removable, err := s.zfsPoolVolumeSnapshotRemovable(fmt.Sprintf("containers/%s", cName), snapName)
+	if s.zfsPoolVolumeExists(fmt.Sprintf("containers/%s@%s", sourceContainerName, snapName)) {
+		removable, err := s.zfsPoolVolumeSnapshotRemovable(fmt.Sprintf("containers/%s", sourceContainerName), snapName)
 		if removable {
-			err = s.zfsPoolVolumeSnapshotDestroy(fmt.Sprintf("containers/%s", cName), snapName)
+			err = s.zfsPoolVolumeSnapshotDestroy(fmt.Sprintf("containers/%s", sourceContainerName), snapName)
 			if err != nil {
 				return err
 			}
 		} else {
-			err = s.zfsPoolVolumeSnapshotRename(fmt.Sprintf("containers/%s", cName), snapName, fmt.Sprintf("copy-%s", uuid.NewRandom().String()))
+			err = s.zfsPoolVolumeSnapshotRename(fmt.Sprintf("containers/%s", sourceContainerName), snapName, fmt.Sprintf("copy-%s", uuid.NewRandom().String()))
 			if err != nil {
 				return err
 			}
 		}
 	}
 
-	snapPath := shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", cName, fields[1]))
+	// COMMENT(brauner): Delete the snapshot on its storage pool:
+	// ${POOL}/snapshots/<snapshot_name>
+	snapshotContainerName := snapshotContainer.Name()
+	snapshotContainerMntPoint := getSnapshotMountPoint(s.pool.PoolName, snapshotContainerName)
+	if shared.PathExists(snapshotContainerMntPoint) {
+		err := os.RemoveAll(snapshotContainerMntPoint)
+		if err != nil {
+			return err
+		}
+	}
+
+	// COMMENT(brauner): Check if we can remove the snapshot symlink:
+	// ${LXD_DIR}/snapshots/<container_name> -> ${POOL}/snapshots/<container_name>
+	// by checking if the directory is empty.
+	snapshotContainerPath := getSnapshotMountPoint(s.pool.PoolName, sourceContainerName)
+	empty, _ := shared.PathIsEmpty(snapshotContainerPath)
+	if empty == true {
+		// COMMENT(brauner): Remove the snapshot directory for the
+		// container:
+		// ${POOL}/snapshots/<source_container_name>
+		err := os.Remove(snapshotContainerPath)
+		if err != nil {
+			return err
+		}
+
+		snapshotSymlink := shared.VarPath("snapshots", sourceContainerName)
+		if shared.PathExists(snapshotSymlink) {
+			err := os.Remove(snapshotSymlink)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// COMMENT(brauner): Legacy
+	snapPath := shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", sourceContainerName, fields[1]))
 	if shared.PathExists(snapPath) {
 		err := os.Remove(snapPath)
 		if err != nil {
@@ -733,7 +814,8 @@ func (s *storageZfs) ContainerSnapshotDelete(snapshotContainer container) error
 		}
 	}
 
-	parent := shared.VarPath(fmt.Sprintf("snapshots/%s", cName))
+	// COMMENT(brauner): Legacy
+	parent := shared.VarPath(fmt.Sprintf("snapshots/%s", sourceContainerName))
 	if ok, _ := shared.PathIsEmpty(parent); ok {
 		err := os.Remove(parent)
 		if err != nil {
@@ -745,41 +827,50 @@ func (s *storageZfs) ContainerSnapshotDelete(snapshotContainer container) error
 }
 
 func (s *storageZfs) ContainerSnapshotRename(snapshotContainer container, newName string) error {
+	oldName := snapshotContainer.Name()
+
 	oldFields := strings.SplitN(snapshotContainer.Name(), shared.SnapshotDelimiter, 2)
 	oldcName := oldFields[0]
-	oldName := fmt.Sprintf("snapshot-%s", oldFields[1])
+	oldZfsDatasetName := fmt.Sprintf("snapshot-%s", oldFields[1])
 
 	newFields := strings.SplitN(newName, shared.SnapshotDelimiter, 2)
-	newcName := newFields[0]
-	newName = fmt.Sprintf("snapshot-%s", newFields[1])
+	newZfsDatasetName := fmt.Sprintf("snapshot-%s", newFields[1])
 
-	if oldName != newName {
-		err := s.zfsPoolVolumeSnapshotRename(fmt.Sprintf("containers/%s", oldcName), oldName, newName)
+	if oldZfsDatasetName != newZfsDatasetName {
+		err := s.zfsPoolVolumeSnapshotRename(fmt.Sprintf("containers/%s", oldcName), oldZfsDatasetName, newZfsDatasetName)
 		if err != nil {
 			return err
 		}
 	}
 
-	err := os.Remove(shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", oldcName, oldFields[1])))
-	if err != nil {
-		return err
+	oldStyleSnapshotMntPoint := shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", oldcName, oldFields[1]))
+	if shared.PathExists(oldStyleSnapshotMntPoint) {
+		err := os.Remove(oldStyleSnapshotMntPoint)
+		if err != nil {
+			return err
+		}
 	}
 
-	if !shared.PathExists(shared.VarPath(fmt.Sprintf("snapshots/%s", newcName))) {
-		err = os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", newcName)), 0700)
+	oldSnapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, oldName)
+	if shared.PathExists(oldSnapshotMntPoint) {
+		err := os.Remove(oldSnapshotMntPoint)
 		if err != nil {
 			return err
 		}
 	}
 
-	err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", newcName, newFields[1])))
-	if err != nil {
-		return err
+	newSnapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, newName)
+	if !shared.PathExists(newSnapshotMntPoint) {
+		err := os.MkdirAll(newSnapshotMntPoint, 0700)
+		if err != nil {
+			return err
+		}
 	}
 
-	parent := shared.VarPath(fmt.Sprintf("snapshots/%s", oldcName))
-	if ok, _ := shared.PathIsEmpty(parent); ok {
-		err = os.Remove(parent)
+	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.PoolName, "snapshots", oldcName)
+	snapshotMntPointSymlink := shared.VarPath("snapshots", oldcName)
+	if !shared.PathExists(snapshotMntPointSymlink) {
+		err := os.Symlink(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
 		if err != nil {
 			return err
 		}
@@ -800,7 +891,8 @@ func (s *storageZfs) ContainerSnapshotStart(container container) error {
 	sourceSnap := fmt.Sprintf("snapshot-%s", sName)
 	destFs := fmt.Sprintf("snapshots/%s/%s", cName, sName)
 
-	err := s.zfsPoolVolumeClone(sourceFs, sourceSnap, destFs, false)
+	snapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, container.Name())
+	err := s.zfsPoolVolumeClone(sourceFs, sourceSnap, destFs, snapshotMntPoint)
 	if err != nil {
 		return err
 	}
@@ -841,23 +933,15 @@ func (s *storageZfs) ContainerSnapshotCreateEmpty(snapshotContainer container) e
 // - remove mountpoint property from zfs volume images/<fingerprint>
 // - create read-write snapshot from zfs volume images/<fingerprint>
 func (s *storageZfs) ImageCreate(fingerprint string) error {
-	// Create temporary mountpoint directory.
-	tmpImageDir, err := ioutil.TempDir(shared.VarPath("images"), "lxd_images_")
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(tmpImageDir)
-
-	imagePath := shared.VarPath("images", fingerprint)
+	imageMntPoint := getImageMountPoint(s.pool.PoolName, fingerprint)
 	fs := fmt.Sprintf("images/%s", fingerprint)
-
 	if s.zfsPoolVolumeExists(fmt.Sprintf("deleted/%s", fs)) {
 		err := s.zfsPoolVolumeRename(fmt.Sprintf("deleted/%s", fs), fs)
 		if err != nil {
 			return err
 		}
 
-		err = s.zfsPoolVolumeSet(fs, "mountpoint", "none")
+		err = s.zfsPoolVolumeSet(fs, "mountpoint", imageMntPoint)
 		if err != nil {
 			return err
 		}
@@ -865,6 +949,23 @@ func (s *storageZfs) ImageCreate(fingerprint string) error {
 		return nil
 	}
 
+	if !shared.PathExists(imageMntPoint) {
+		err := os.MkdirAll(imageMntPoint, 0700)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Create temporary mountpoint directory.
+	tmp := getImageMountPoint(s.pool.PoolName, "")
+	tmpImageDir, err := ioutil.TempDir(tmp, "")
+	if err != nil {
+		return err
+	}
+	defer os.RemoveAll(tmpImageDir)
+
+	imagePath := shared.VarPath("images", fingerprint)
+
 	// Create a new storage volume on the storage pool for the image.
 	err = s.zfsPoolVolumeCreate(fs)
 	if err != nil {
@@ -950,6 +1051,14 @@ func (s *storageZfs) ImageDelete(fingerprint string) error {
 		}
 	}
 
+	imageMntPoint := getImageMountPoint(s.pool.PoolName, fingerprint)
+	if shared.PathExists(imageMntPoint) {
+		err := os.RemoveAll(imageMntPoint)
+		if err != nil {
+			return err
+		}
+	}
+
 	if shared.PathExists(shared.VarPath(fs + ".zfs")) {
 		err := os.RemoveAll(shared.VarPath(fs + ".zfs"))
 		if err != nil {
@@ -1035,6 +1144,15 @@ func (s *storageZfs) zfsPoolCreate() error {
 			if err != nil {
 				return fmt.Errorf("Failed to close %s: %s", vdev, err)
 			}
+		} else {
+			// COMMENT(brauner): This is a block devie. Note, that
+			// we do not store the block device path or UUID or
+			// PARTUUID or similar in the database. All of those
+			// might change or might be used in a special way (For
+			// example, zfs uses a single UUID in a multi-device
+			// pool for all devices.). The safest way is to just
+			// store the name of the zfs pool we create.
+			s.pool.PoolConfig["source"] = s.pool.PoolName
 		}
 	}
 
@@ -1054,14 +1172,7 @@ func (s *storageZfs) zfsPoolCreate() error {
 	return nil
 }
 
-func (s *storageZfs) zfsPoolVolumeClone(source string, name string, dest string, dotZfs bool) error {
-	var mountpoint string
-
-	mountpoint = shared.VarPath(dest)
-	if dotZfs {
-		mountpoint += ".zfs"
-	}
-
+func (s *storageZfs) zfsPoolVolumeClone(source string, name string, dest string, mountpoint string) error {
 	output, err := exec.Command(
 		"zfs",
 		"clone",
@@ -1090,16 +1201,13 @@ func (s *storageZfs) zfsPoolVolumeClone(source string, name string, dest string,
 		}
 
 		destSubvol := dest + strings.TrimPrefix(sub, source)
-		mountpoint = shared.VarPath(destSubvol)
-		if dotZfs {
-			mountpoint += ".zfs"
-		}
+		snapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, destSubvol)
 
 		output, err := exec.Command(
 			"zfs",
 			"clone",
 			"-p",
-			"-o", fmt.Sprintf("mountpoint=%s", mountpoint),
+			"-o", fmt.Sprintf("mountpoint=%s", snapshotMntPoint),
 			fmt.Sprintf("%s/%s@%s", s.pool.PoolName, sub, name),
 			fmt.Sprintf("%s/%s", s.pool.PoolName, destSubvol)).CombinedOutput()
 		if err != nil {
@@ -1125,21 +1233,6 @@ func (s *storageZfs) zfsPoolVolumeCreate(path string) error {
 	return nil
 }
 
-// func (s *storageZfs) zfsPoolVolumeCreate(path string) error {
-// 	output, err := exec.Command(
-// 		"zfs",
-// 		"create",
-// 		"-p",
-// 		"-o", fmt.Sprintf("mountpoint=%s.zfs", shared.VarPath(path)),
-// 		fmt.Sprintf("%s/%s", s.pool.PoolName, path)).CombinedOutput()
-// 	if err != nil {
-// 		s.log.Error("zfs create failed", log.Ctx{"output": string(output)})
-// 		return fmt.Errorf("Failed to create ZFS filesystem: %s", output)
-// 	}
-//
-// 	return nil
-// }
-
 func (s *storageZfs) zfsPoolDelete() error {
 	zpoolName := s.pool.PoolConfig["zfs.pool_name"]
 	if zpoolName == "" {

From 84a7c5a72bbd42965941fee47aad75c19ec5e912 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 25 Jan 2017 13:17:54 +0100
Subject: [PATCH 48/63] storage: reimplement lvm backend

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_lvm.go | 1047 ++++++++++++++++++++++++++++++++--------------------
 1 file changed, 641 insertions(+), 406 deletions(-)

diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 59e3b58..09bcbb7 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -2,7 +2,6 @@ package main
 
 import (
 	"fmt"
-	"io/ioutil"
 	"os"
 	"os/exec"
 	"path/filepath"
@@ -89,7 +88,7 @@ func storageLVMGetThinPoolUsers(d *Daemon) ([]string, error) {
 	return results, nil
 }
 
-func storageLVMValidateThinPoolName(d *Daemon, key string, value string) error {
+func storageLVMValidateThinPoolName(d *Daemon, vgName string, value string) error {
 	users, err := storageLVMGetThinPoolUsers(d)
 	if err != nil {
 		return fmt.Errorf("Error checking if a pool is already in use: %v", err)
@@ -99,19 +98,18 @@ func storageLVMValidateThinPoolName(d *Daemon, key string, value string) error {
 		return fmt.Errorf("Can not change LVM config. Images or containers are still using LVs: %v", users)
 	}
 
-	vgname := daemonConfig["storage.lvm_vg_name"].Get()
 	if value != "" {
-		if vgname == "" {
+		if vgName == "" {
 			return fmt.Errorf("Can not set lvm_thinpool_name without lvm_vg_name set.")
 		}
 
-		poolExists, err := storageLVMThinpoolExists(vgname, value)
+		poolExists, err := storageLVMThinpoolExists(vgName, value)
 		if err != nil {
-			return fmt.Errorf("Error checking for thin pool '%s' in '%s': %v", value, vgname, err)
+			return fmt.Errorf("Error checking for thin pool '%s' in '%s': %v", value, vgName, err)
 		}
 
 		if !poolExists {
-			return fmt.Errorf("Pool '%s' does not exist in Volume Group '%s'", value, vgname)
+			return fmt.Errorf("Pool '%s' does not exist in Volume Group '%s'", value, vgName)
 		}
 	}
 
@@ -159,6 +157,14 @@ type storageLvm struct {
 	storageShared
 }
 
+func (s *storageLvm) getLvmDevPath(lvmPool string, volumeType string, lvmVolume string) string {
+	return fmt.Sprintf("/dev/%s/%s_%s", lvmPool, volumeType, lvmVolume)
+}
+
+func (s *storageLvm) getPrefixedLvName(volumeType string, lvmVolume string) string {
+	return fmt.Sprintf("%s_%s", volumeType, lvmVolume)
+}
+
 // Only initialize the minimal information we need about a given storage type.
 func (s *storageLvm) StorageCoreInit() (*storageCore, error) {
 	sCore := storageCore{}
@@ -253,18 +259,73 @@ func (s *storageLvm) lvmVersionIsAtLeast(versionString string) (bool, error) {
 }
 
 func (s *storageLvm) StoragePoolCreate() error {
-	return nil
-}
+	tryUndo := true // CLEANER
+
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
+
+	// COMMENT(brauner): Create the mountpoint for the storage pool.
+	poolMntPoint := getStoragePoolMountPoint(s.pool.PoolName)
+	err := os.MkdirAll(poolMntPoint, 0711)
+	if err != nil {
+		return err
+	}
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			os.Remove(poolMntPoint)
+		}
+	}()
+
+	if !shared.IsBlockdevPath(source) {
+		return fmt.Errorf("Loop backed lvm storage volumes are currently not supported.")
+	}
+
+	// COMMENT(brauner): Create a lvm physical volume.
+	output, err := exec.Command("pvcreate", source).CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("Failed to create the physical volume for the lvm storage pool: %s.", output)
+	}
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			exec.Command("pvremove", source).Run()
+		}
+	}()
+
+	// COMMENT(brauner): Create a volume group on the physical volume.
+	output, err = exec.Command("vgcreate", s.pool.PoolName, source).CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("Failed to create the volume group for the lvm storage pool: %s.", output)
+	}
+
+	s.pool.PoolConfig["source"] = s.pool.PoolName
+
+	// COMMENT(brauner): Deregister cleanup.
+	tryUndo = false
 
-func (s *storageLvm) StoragePoolVolumeCreate() error {
 	return nil
 }
 
 func (s *storageLvm) StoragePoolDelete() error {
-	return nil
-}
+	source := s.pool.PoolConfig["source"]
+	if source == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool.")
+	}
+
+	// COMMENT(brauner): Remove the volume group.
+	output, err := exec.Command("vgremove", "-f", s.pool.PoolName).CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("Failed to destroy the volume group for the lvm storage pool: %s.", output)
+	}
+
+	// COMMENT(brauner): Delete the mountpoint for the storage pool.
+	poolMntPoint := getStoragePoolMountPoint(s.pool.PoolName)
+	err = os.RemoveAll(poolMntPoint)
+	if err != nil {
+		return err
+	}
 
-func (s *storageLvm) StoragePoolVolumeDelete() error {
 	return nil
 }
 
@@ -276,11 +337,106 @@ func (s *storageLvm) StoragePoolUmount() (error, bool) {
 	return nil, true
 }
 
+func (s *storageLvm) StoragePoolVolumeCreate() error {
+	tryUndo := true // CLEANER
+
+	vgName := s.pool.PoolName
+	thinPoolName := s.volume.VolumeConfig["lvm.thinpool_name"]
+	lvFsType := s.volume.VolumeConfig["block.filesystem"]
+	lvSize := s.volume.VolumeConfig["size"]
+
+	volumeType, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.VolumeType)
+	if err != nil {
+		return err
+	}
+
+	err = s.createThinLV(vgName, thinPoolName, s.volume.VolumeName, lvFsType, lvSize, volumeType)
+	if err != nil {
+		s.log.Error("LVMCreateThinLV", log.Ctx{"err": err})
+		return fmt.Errorf("Error Creating LVM LV for new image: %v", err)
+	}
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			s.StoragePoolVolumeDelete()
+		}
+	}()
+
+	customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
+	err = os.MkdirAll(customPoolVolumeMntPoint, 0711)
+	if err != nil {
+		return err
+	}
+
+	err, _ = s.StoragePoolVolumeMount()
+	if err != nil {
+		return err
+	}
+
+	tryUndo = false
+
+	return nil
+}
+
+func (s *storageLvm) StoragePoolVolumeDelete() error {
+	customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
+	err, _ := s.StoragePoolVolumeUmount()
+	if err != nil {
+		return err
+	}
+
+	volumeType, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.VolumeType)
+	if err != nil {
+		return err
+	}
+
+	err = s.removeLV(s.pool.PoolName, volumeType, s.volume.VolumeName)
+	if err != nil {
+		return err
+	}
+
+	if shared.PathExists(customPoolVolumeMntPoint) {
+		err := os.Remove(customPoolVolumeMntPoint)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
 func (s *storageLvm) StoragePoolVolumeMount() (error, bool) {
+	customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
+	if shared.IsMountPoint(customPoolVolumeMntPoint) {
+		return nil, false
+	}
+
+	lvFsType := s.volume.VolumeConfig["block.filesystem"]
+	volumeType, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.VolumeType)
+	if err != nil {
+		return err, false
+	}
+
+	lvmVolumePath := s.getLvmDevPath(s.pool.PoolName, volumeType, s.volume.VolumeName)
+	mountOptions := s.volume.VolumeConfig["block.mount_options"]
+	err = tryMount(lvmVolumePath, customPoolVolumeMntPoint, lvFsType, 0, mountOptions)
+	if err != nil {
+		return err, false
+	}
+
 	return nil, true
 }
 
 func (s *storageLvm) StoragePoolVolumeUmount() (error, bool) {
+	customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.PoolName, s.volume.VolumeName)
+	if !shared.IsMountPoint(customPoolVolumeMntPoint) {
+		return nil, false
+	}
+
+	err := tryUnmount(customPoolVolumeMntPoint, 0)
+	if err != nil {
+		return err, false
+	}
+
 	return nil, true
 }
 
@@ -317,124 +473,117 @@ func (s *storageLvm) StoragePoolVolumeUpdate(changedConfig []string) error {
 }
 
 func (s *storageLvm) ContainerCreate(container container) error {
-	containerName := containerNameToLVName(container.Name())
-	lvpath, err := s.createThinLV(containerName)
-	if err != nil {
-		return err
-	}
+	tryUndo := true // CLEANER
+
+	containerName := container.Name()
+	containerLvmName := containerNameToLVName(containerName)
+	thinPoolName := s.volume.VolumeConfig["lvm.thinpool_name"]
+	lvFsType := s.volume.VolumeConfig["block.filesystem"]
+	lvSize := s.volume.VolumeConfig["size"]
 
-	if err := os.MkdirAll(container.Path(), 0755); err != nil {
+	err := s.createThinLV(s.pool.PoolName, thinPoolName, containerLvmName, lvFsType, lvSize, storagePoolVolumeApiEndpointContainers)
+	if err != nil {
 		return err
 	}
+	defer func( /* CLEANER) */ ) {
+		if tryUndo {
+			s.ContainerDelete(container)
+		}
+	}()
 
-	var mode os.FileMode
-	if container.IsPrivileged() {
-		mode = 0700
+	if container.IsSnapshot() {
+		containerMntPoint := getSnapshotMountPoint(s.pool.PoolName, containerName)
+		fields := strings.SplitN(containerName, shared.SnapshotDelimiter, 2)
+		sourceName := fields[0]
+		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.PoolName, "snapshots", sourceName)
+		snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
+		err = createSnapshotMountpoint(containerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
 	} else {
-		mode = 0755
+		containerMntPoint := getContainerMountPoint(s.pool.PoolName, containerName)
+		containerPath := container.Path()
+		err = createContainerMountpoint(containerMntPoint, containerPath, container.IsPrivileged())
 	}
-
-	err = os.Chmod(container.Path(), mode)
 	if err != nil {
 		return err
 	}
 
-	dst := fmt.Sprintf("%s.lv", container.Path())
-	err = os.Symlink(lvpath, dst)
-	if err != nil {
-		return err
-	}
+	tryUndo = false // CLEANER
 
 	return nil
 }
 
-func (s *storageLvm) ContainerCreateFromImage(
-	container container, imageFingerprint string) error {
+func (s *storageLvm) ContainerCreateFromImage(container container, imageFingerprint string) error {
+	tryUndo := true // CLEANER
 
-	imageLVFilename := shared.VarPath(
-		"images", fmt.Sprintf("%s.lv", imageFingerprint))
+	// COMMENT(brauner): Check if the image already exists.
+	mntErrno, _ := s.ImageMount(imageFingerprint)
+	if mntErrno != nil && mntErrno != syscall.ENOENT {
+		return mntErrno
+	}
+	if mntErrno != nil {
+		if mntErrno != syscall.ENOENT {
+			return mntErrno
+		}
 
-	if !shared.PathExists(imageLVFilename) {
-		if err := s.ImageCreate(imageFingerprint); err != nil {
+		err := s.ImageCreate(imageFingerprint)
+		if err != nil {
 			return err
 		}
 	}
+	s.ImageUmount(imageFingerprint)
 
-	containerName := containerNameToLVName(container.Name())
-
-	lvpath, err := s.createSnapshotLV(containerName, imageFingerprint, false)
+	containerName := container.Name()
+	containerLvmName := containerNameToLVName(containerName)
+	containerLvSnapshotPath, err := s.createSnapshotLV(s.pool.PoolName, imageFingerprint, storagePoolVolumeApiEndpointImages, containerLvmName, storagePoolVolumeApiEndpointContainers, false)
 	if err != nil {
 		return err
 	}
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			s.ContainerDelete(container)
+		}
+	}()
 
-	destPath := container.Path()
-	if err := os.MkdirAll(destPath, 0755); err != nil {
-		return fmt.Errorf("Error creating container directory: %v", err)
-	}
-
-	err = os.Chmod(destPath, 0700)
-	if err != nil {
-		return err
-	}
-
-	dst := shared.VarPath("containers", fmt.Sprintf("%s.lv", container.Name()))
-	err = os.Symlink(lvpath, dst)
+	containerMntPoint := getContainerMountPoint(s.pool.PoolName, containerName)
+	containerPath := container.Path()
+	err = createContainerMountpoint(containerMntPoint, containerPath, container.IsPrivileged())
 	if err != nil {
 		return err
 	}
 
 	// Generate a new xfs's UUID
-	fstype := daemonConfig["storage.lvm_fstype"].Get()
-	if fstype == "xfs" {
-		err := xfsGenerateNewUUID(lvpath)
+	lvFsType := s.volume.VolumeConfig["block.filesystem"]
+	if lvFsType == "xfs" {
+		err := xfsGenerateNewUUID(containerLvSnapshotPath)
 		if err != nil {
-			s.ContainerDelete(container)
 			return err
 		}
 	}
 
-	mountOptions := daemonConfig["storage.lvm_mount_options"].Get()
-	err = tryMount(lvpath, destPath, fstype, 0, mountOptions)
-	if err != nil {
-		s.ContainerDelete(container)
-		return fmt.Errorf("Error mounting snapshot LV: %v", err)
-	}
-
-	var mode os.FileMode
-	if container.IsPrivileged() {
-		mode = 0700
-	} else {
-		mode = 0755
-	}
-
-	err = os.Chmod(destPath, mode)
+	err, ourMount := s.ContainerMount(containerName, containerPath)
 	if err != nil {
 		return err
 	}
+	if ourMount {
+		defer s.ContainerUmount(containerName, containerPath)
+	}
 
 	if !container.IsPrivileged() {
-		if err = s.shiftRootfs(container); err != nil {
-			err2 := tryUnmount(destPath, 0)
-			if err2 != nil {
-				return fmt.Errorf("Error in umount: '%s' while cleaning up after error in shiftRootfs: '%s'", err2, err)
-			}
-			s.ContainerDelete(container)
-			return fmt.Errorf("Error in shiftRootfs: %v", err)
+		err := s.shiftRootfs(container)
+		if err != nil {
+			return err
 		}
 	}
 
 	err = container.TemplateApply("create")
 	if err != nil {
-		s.log.Error("Error in create template during ContainerCreateFromImage, continuing to unmount",
-			log.Ctx{"err": err})
+		s.log.Error("Error in create template during ContainerCreateFromImage, continuing to unmount", log.Ctx{"err": err})
+		return err
 	}
 
-	umounterr := tryUnmount(destPath, 0)
-	if umounterr != nil {
-		return fmt.Errorf("Error unmounting '%s' after shiftRootfs: %v", destPath, umounterr)
-	}
+	tryUndo = false // CLEANER
 
-	return err
+	return nil
 }
 
 func (s *storageLvm) ContainerCanRestore(container container, sourceContainer container) error {
@@ -442,107 +591,166 @@ func (s *storageLvm) ContainerCanRestore(container container, sourceContainer co
 }
 
 func (s *storageLvm) ContainerDelete(container container) error {
-	lvName := containerNameToLVName(container.Name())
-	if err := s.removeLV(lvName); err != nil {
-		return err
+	containerName := container.Name()
+	containerLvmName := containerNameToLVName(containerName)
+	containerMntPoint := ""
+
+	if container.IsSnapshot() {
+		containerMntPoint = getSnapshotMountPoint(s.pool.PoolName, containerName)
+	} else {
+		containerMntPoint = getContainerMountPoint(s.pool.PoolName, containerName)
+	}
+
+	// COMMENT(brauner): Make sure that the container is really unmounted at
+	// this point. Otherwise we will fail.
+	if shared.IsMountPoint(containerMntPoint) {
+		err := tryUnmount(containerMntPoint, 0)
+		if err != nil {
+			return fmt.Errorf("failed to unmount container path '%s': %s", containerMntPoint, err)
+		}
 	}
 
-	lvLinkPath := fmt.Sprintf("%s.lv", container.Path())
-	if err := os.Remove(lvLinkPath); err != nil {
+	err := s.removeLV(s.pool.PoolName, storagePoolVolumeApiEndpointContainers, containerLvmName)
+	if err != nil {
 		return err
 	}
 
-	cPath := container.Path()
-	if err := os.RemoveAll(cPath); err != nil {
-		s.log.Error("ContainerDelete: failed to remove path", log.Ctx{"cPath": cPath, "err": err})
-		return fmt.Errorf("Cleaning up %s: %s", cPath, err)
+	if container.IsSnapshot() {
+		fields := strings.SplitN(containerName, shared.SnapshotDelimiter, 2)
+		sourceName := fields[0]
+		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.PoolName, "snapshots", sourceName)
+		snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
+		err = deleteSnapshotMountpoint(containerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
+	} else {
+		err = tryUnmount(containerMntPoint, 0)
+		err = deleteContainerMountpoint(containerMntPoint, container.Path(), s.GetStorageTypeName())
+	}
+	if err != nil {
+		return err
 	}
 
 	return nil
 }
 
 func (s *storageLvm) ContainerCopy(container container, sourceContainer container) error {
-	if s.isLVMContainer(sourceContainer) {
-		if err := s.createSnapshotContainer(container, sourceContainer, false); err != nil {
+	tryUndo := true // CLEANER
+
+	if sourceContainer.Storage().GetStorageType() == storageTypeLvm {
+		err := s.createSnapshotContainer(container, sourceContainer, false)
+		if err != nil {
 			s.log.Error("Error creating snapshot LV for copy", log.Ctx{"err": err})
 			return err
 		}
 	} else {
-		s.log.Info("Copy from Non-LVM container", log.Ctx{"container": container.Name(),
-			"sourceContainer": sourceContainer.Name()})
-		if err := s.ContainerCreate(container); err != nil {
+		sourceContainerName := sourceContainer.Name()
+		targetContainerName := container.Name()
+		s.log.Info("Copy from Non-LVM container", log.Ctx{"container": targetContainerName, "sourceContainer": sourceContainerName})
+		err := s.ContainerCreate(container)
+		if err != nil {
 			s.log.Error("Error creating empty container", log.Ctx{"err": err})
 			return err
 		}
+		defer func( /* CLEANER */ ) {
+			if tryUndo {
+				s.ContainerDelete(container)
+			}
+		}()
 
-		if err, _ := s.ContainerMount(container.Name(), container.Path()); err != nil {
-			s.log.Error("Error starting/mounting container", log.Ctx{"err": err, "container": container.Name()})
-			s.ContainerDelete(container)
+		targetContainerPath := container.Path()
+		err, ourSourceMount := s.ContainerMount(targetContainerName, targetContainerPath)
+		if err != nil {
+			s.log.Error("Error starting/mounting container", log.Ctx{"err": err, "container": targetContainerName})
 			return err
 		}
+		if ourSourceMount {
+			defer s.ContainerUmount(targetContainerName, targetContainerPath)
+		}
 
-		output, err := storageRsyncCopy(
-			sourceContainer.Path(),
-			container.Path())
+		sourceContainerPath := sourceContainer.Path()
+		err, ourTargetMount := sourceContainer.Storage().ContainerMount(sourceContainerName, sourceContainerPath)
+		if err != nil {
+			return err
+		}
+		if ourTargetMount {
+			sourceContainer.Storage().ContainerUmount(sourceContainerName, sourceContainerPath)
+		}
+
+		sourcePool := sourceContainer.Storage().ContainerPoolGet()
+		sourceContainerMntPoint := getContainerMountPoint(sourcePool, sourceContainerName)
+		targetContainerMntPoint := getContainerMountPoint(s.pool.PoolName, targetContainerName)
+		output, err := storageRsyncCopy(sourceContainerMntPoint, targetContainerMntPoint)
 		if err != nil {
 			s.log.Error("ContainerCopy: rsync failed", log.Ctx{"output": string(output)})
 			s.ContainerDelete(container)
 			return fmt.Errorf("rsync failed: %s", string(output))
 		}
+	}
 
-		if err, _ := s.ContainerUmount(container.Name(), container.Path()); err != nil {
-			return err
-		}
+	err := container.TemplateApply("copy")
+	if err != nil {
+		return err
 	}
-	return container.TemplateApply("copy")
+
+	tryUndo = false // CLEANER
+
+	return nil
 }
 
 func (s *storageLvm) ContainerMount(name string, path string) (error, bool) {
-	lvName := containerNameToLVName(name)
-	lvpath := fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, lvName)
-	fstype := daemonConfig["storage.lvm_fstype"].Get()
+	containerLvmName := containerNameToLVName(name)
+	lvFsType := s.volume.VolumeConfig["block.filesystem"]
+	containerLvmPath := s.getLvmDevPath(s.pool.PoolName, storagePoolVolumeApiEndpointContainers, containerLvmName)
+	mountOptions := s.volume.VolumeConfig["block.mount_options"]
+	containerMntPoint := getContainerMountPoint(s.pool.PoolName, name)
+
+	if shared.IsMountPoint(containerMntPoint) {
+		return nil, false
+	}
 
-	mountOptions := daemonConfig["storage.lvm_mount_options"].Get()
-	err := tryMount(lvpath, path, fstype, 0, mountOptions)
+	err := tryMount(containerLvmPath, containerMntPoint, lvFsType, 0, mountOptions)
 	if err != nil {
-		return fmt.Errorf(
-			"Error mounting snapshot LV path='%s': %v",
-			path,
-			err), false
+		return fmt.Errorf("Error mounting snapshot LV path='%s': %s", path, err), false
 	}
 
 	return nil, true
 }
 
 func (s *storageLvm) ContainerUmount(name string, path string) (error, bool) {
-	err := tryUnmount(path, 0)
+	containerMntPoint := getContainerMountPoint(s.pool.PoolName, name)
+
+	if !shared.IsMountPoint(containerMntPoint) {
+		return nil, false
+	}
+
+	err := tryUnmount(containerMntPoint, 0)
 	if err != nil {
-		return fmt.Errorf(
-			"failed to unmount container path '%s'.\nError: %v",
-			path,
-			err), false
+		return fmt.Errorf("failed to unmount container path '%s': %s", path, err), false
 	}
 
 	return nil, true
 }
 
-func (s *storageLvm) ContainerRename(
-	container container, newContainerName string) error {
+func (s *storageLvm) ContainerRename(container container, newContainerName string) error {
+	tryUndo := true // CLEANER
 
-	oldName := containerNameToLVName(container.Name())
-	newName := containerNameToLVName(newContainerName)
-	output, err := s.renameLV(oldName, newName)
-	if err != nil {
-		s.log.Error("Failed to rename a container LV",
-			log.Ctx{"oldName": oldName,
-				"newName": newName,
-				"err":     err,
-				"output":  string(output)})
+	oldName := container.Name()
+	oldLvmName := containerNameToLVName(oldName)
+	newLvmName := containerNameToLVName(newContainerName)
 
-		return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldName, newName, err)
+	output, err := s.renameLV(oldLvmName, newLvmName, storagePoolVolumeApiEndpointContainers)
+	if err != nil {
+		s.log.Error("Failed to rename a container LV", log.Ctx{"oldName": oldLvmName, "newName": newLvmName, "err": err, "output": string(output)})
+		return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldLvmName, newLvmName, err)
 	}
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			s.renameLV(newLvmName, oldLvmName, storagePoolVolumeApiEndpointContainers)
+		}
+	}()
 
-	// Rename the snapshots
+	// MAYBE(TODO(brauner)): Register another cleanup function that tries to
+	// rename alreday renamed snapshots back to their old name when the
+	// rename fails.
 	if !container.IsSnapshot() {
 		snaps, err := container.Snapshots()
 		if err != nil {
@@ -551,64 +759,83 @@ func (s *storageLvm) ContainerRename(
 
 		for _, snap := range snaps {
 			baseSnapName := filepath.Base(snap.Name())
-			newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName
+			newSnapshotName := newContainerName + shared.SnapshotDelimiter + baseSnapName
 			err := s.ContainerRename(snap, newSnapshotName)
 			if err != nil {
 				return err
 			}
-
-			oldPathParent := filepath.Dir(snap.Path())
-			if ok, _ := shared.PathIsEmpty(oldPathParent); ok {
-				os.Remove(oldPathParent)
-			}
 		}
-	}
 
-	// Create a new symlink
-	newSymPath := fmt.Sprintf("%s.lv", containerPath(newContainerName, container.IsSnapshot()))
+		oldContainerMntPoint := getContainerMountPoint(s.pool.PoolName, oldName)
+		oldContainerMntPointSymlink := container.Path()
+		newContainerMntPoint := getContainerMountPoint(s.pool.PoolName, newContainerName)
+		newContainerMntPointSymlink := shared.VarPath("containers", newContainerName)
+		err = renameContainerMountpoint(oldContainerMntPoint, oldContainerMntPointSymlink, newContainerMntPoint, newContainerMntPointSymlink)
+		if err != nil {
+			return err
+		}
 
-	err = os.MkdirAll(filepath.Dir(containerPath(newContainerName, container.IsSnapshot())), 0700)
-	if err != nil {
-		return err
-	}
+		oldSnapshotPath := getSnapshotMountPoint(s.pool.PoolName, oldName)
+		newSnapshotPath := getSnapshotMountPoint(s.pool.PoolName, newContainerName)
+		if shared.PathExists(oldSnapshotPath) {
+			err = os.Rename(oldSnapshotPath, newSnapshotPath)
+			if err != nil {
+				return err
+			}
+		}
 
-	err = os.Symlink(fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, newName), newSymPath)
-	if err != nil {
-		return err
-	}
+		oldSnapshotSymlink := shared.VarPath("snapshots", oldName)
+		newSnapshotSymlink := shared.VarPath("snapshots", newContainerName)
+		if shared.PathExists(oldSnapshotSymlink) {
+			err := os.Remove(oldSnapshotSymlink)
+			if err != nil {
+				return err
+			}
 
-	// Remove the old symlink
-	oldSymPath := fmt.Sprintf("%s.lv", container.Path())
-	err = os.Remove(oldSymPath)
-	if err != nil {
-		return err
+			err = os.Symlink(newSnapshotPath, newSnapshotSymlink)
+			if err != nil {
+				return err
+			}
+		}
 	}
 
-	// Rename the directory
-	err = os.Rename(container.Path(), containerPath(newContainerName, container.IsSnapshot()))
-	if err != nil {
-		return err
-	}
+	tryUndo = false // CLEANER
 
 	return nil
-
 }
 
-func (s *storageLvm) ContainerRestore(
-	container container, sourceContainer container) error {
+func (s *storageLvm) ContainerRestore(container container, sourceContainer container) error {
+	tryUndo := true // CLEANER
+
+	if s.pool.PoolName != sourceContainer.Storage().ContainerPoolGet() {
+		return fmt.Errorf("Containers must be on the same pool to be restored.")
+	}
+
 	srcName := containerNameToLVName(sourceContainer.Name())
 	destName := containerNameToLVName(container.Name())
-
-	err := s.removeLV(destName)
+	backupName := containerNameToLVName(srcName + ".back")
+	_, err := s.renameLV(srcName, backupName, storagePoolVolumeApiEndpointContainers)
 	if err != nil {
-		return fmt.Errorf("Error removing LV about to be restored over: %v", err)
+		return err
 	}
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			s.renameLV(backupName, srcName, storagePoolVolumeApiEndpointContainers)
+		}
+	}()
 
-	_, err = s.createSnapshotLV(destName, srcName, false)
+	_, err = s.createSnapshotLV(s.pool.PoolName, srcName, storagePoolVolumeApiEndpointContainers, destName, storagePoolVolumeApiEndpointContainers, false)
 	if err != nil {
 		return fmt.Errorf("Error creating snapshot LV: %v", err)
 	}
 
+	err = s.removeLV(s.pool.PoolName, storagePoolVolumeApiEndpointContainers, backupName)
+	if err != nil {
+		s.log.Error("Failed to remove backup for restore. Manual intervention maybe needed: %s.", err)
+	}
+
+	tryUndo = false // CLEANER
+
 	return nil
 }
 
@@ -620,154 +847,165 @@ func (s *storageLvm) ContainerGetUsage(container container) (int64, error) {
 	return -1, fmt.Errorf("The LVM container backend doesn't support quotas.")
 }
 
-func (s *storageLvm) ContainerSnapshotCreate(
-	snapshotContainer container, sourceContainer container) error {
+func (s *storageLvm) ContainerSnapshotCreate(snapshotContainer container, sourceContainer container) error {
 	return s.createSnapshotContainer(snapshotContainer, sourceContainer, true)
 }
 
-func (s *storageLvm) createSnapshotContainer(
-	snapshotContainer container, sourceContainer container, readonly bool) error {
+func (s *storageLvm) createSnapshotContainer(snapshotContainer container, sourceContainer container, readonly bool) error {
+	tryUndo := true // CLEANER
 
-	srcName := containerNameToLVName(sourceContainer.Name())
-	destName := containerNameToLVName(snapshotContainer.Name())
-	shared.LogDebug(
-		"Creating snapshot",
-		log.Ctx{"srcName": srcName, "destName": destName})
+	sourceContainerName := sourceContainer.Name()
+	targetContainerName := snapshotContainer.Name()
+	sourceContainerLvmName := containerNameToLVName(sourceContainerName)
+	targetContainerLvmName := containerNameToLVName(targetContainerName)
+	shared.LogDebug("Creating snapshot", log.Ctx{"srcName": sourceContainerName, "destName": targetContainerName})
 
-	lvpath, err := s.createSnapshotLV(destName, srcName, readonly)
+	_, err := s.createSnapshotLV(s.pool.PoolName, sourceContainerLvmName, storagePoolVolumeApiEndpointContainers, targetContainerLvmName, storagePoolVolumeApiEndpointContainers, readonly)
 	if err != nil {
-		return fmt.Errorf("Error creating snapshot LV: %v", err)
-	}
-
-	destPath := snapshotContainer.Path()
-	if err := os.MkdirAll(destPath, 0755); err != nil {
-		return fmt.Errorf("Error creating container directory: %v", err)
+		return fmt.Errorf("Error creating snapshot LV: %s", err)
 	}
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			s.ContainerCreate(snapshotContainer)
+		}
+	}()
 
-	var mode os.FileMode
-	if snapshotContainer.IsPrivileged() {
-		mode = 0700
+	targetContainerMntPoint := ""
+	targetContainerPath := snapshotContainer.Path()
+	targetIsSnapshot := snapshotContainer.IsSnapshot()
+	if targetIsSnapshot {
+		targetContainerMntPoint = getSnapshotMountPoint(s.pool.PoolName, targetContainerName)
+		sourceFields := strings.SplitN(sourceContainerName, shared.SnapshotDelimiter, 2)
+		sourceName := sourceFields[0]
+		sourcePool := sourceContainer.Storage().ContainerPoolGet()
+		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", sourcePool, "snapshots", sourceName)
+		snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
+		err = createSnapshotMountpoint(targetContainerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
 	} else {
-		mode = 0755
+		targetContainerMntPoint = getContainerMountPoint(s.pool.PoolName, targetContainerName)
+		err = createContainerMountpoint(targetContainerMntPoint, targetContainerPath, snapshotContainer.IsPrivileged())
 	}
-
-	err = os.Chmod(destPath, mode)
 	if err != nil {
 		return err
 	}
 
-	dest := fmt.Sprintf("%s.lv", snapshotContainer.Path())
-	err = os.Symlink(lvpath, dest)
-	if err != nil {
-		return err
-	}
+	tryUndo = false // CLEANER
 
 	return nil
 }
 
-func (s *storageLvm) ContainerSnapshotDelete(
-	snapshotContainer container) error {
-
+func (s *storageLvm) ContainerSnapshotDelete(snapshotContainer container) error {
 	err := s.ContainerDelete(snapshotContainer)
 	if err != nil {
 		return fmt.Errorf("Error deleting snapshot %s: %s", snapshotContainer.Name(), err)
 	}
 
-	oldPathParent := filepath.Dir(snapshotContainer.Path())
-	if ok, _ := shared.PathIsEmpty(oldPathParent); ok {
-		os.Remove(oldPathParent)
-	}
 	return nil
 }
 
-func (s *storageLvm) ContainerSnapshotRename(
-	snapshotContainer container, newContainerName string) error {
-	oldName := containerNameToLVName(snapshotContainer.Name())
-	newName := containerNameToLVName(newContainerName)
-	oldPath := snapshotContainer.Path()
-	oldSymPath := fmt.Sprintf("%s.lv", oldPath)
-	newPath := containerPath(newContainerName, true)
-	newSymPath := fmt.Sprintf("%s.lv", newPath)
+func (s *storageLvm) ContainerSnapshotRename(snapshotContainer container, newContainerName string) error {
+	tryUndo := true // CLEANER
 
-	// Rename the LV
-	output, err := s.renameLV(oldName, newName)
-	if err != nil {
-		s.log.Error("Failed to rename a snapshot LV",
-			log.Ctx{"oldName": oldName, "newName": newName, "err": err, "output": string(output)})
-		return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldName, newName, err)
-	}
+	oldName := snapshotContainer.Name()
+	oldLvmName := containerNameToLVName(oldName)
+	newLvmName := containerNameToLVName(newContainerName)
 
-	// Delete the symlink
-	err = os.Remove(oldSymPath)
+	output, err := s.renameLV(oldLvmName, newLvmName, storagePoolVolumeApiEndpointContainers)
 	if err != nil {
-		return fmt.Errorf("Failed to remove old symlink: %s", err)
+		s.log.Error("Failed to rename a snapshot LV", log.Ctx{"oldName": oldLvmName, "newName": newLvmName, "err": err, "output": string(output)})
+		return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldLvmName, newLvmName, err)
 	}
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			s.renameLV(newLvmName, oldLvmName, storagePoolVolumeApiEndpointContainers)
+		}
+	}()
 
-	// Create the symlink
-	err = os.Symlink(fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, newName), newSymPath)
+	oldSnapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, oldName)
+	newSnapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, newContainerName)
+	err = os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
 	if err != nil {
-		return fmt.Errorf("Failed to create symlink: %s", err)
+		return err
 	}
 
-	// Rename the mount point
-	err = os.Rename(oldPath, newPath)
-	if err != nil {
-		return fmt.Errorf("Failed to rename mountpoint: %s", err)
-	}
+	tryUndo = false // CLEANER
 
 	return nil
 }
 
 func (s *storageLvm) ContainerSnapshotStart(container container) error {
-	srcName := containerNameToLVName(container.Name())
-	destName := containerNameToLVName(container.Name() + "/rw")
+	tryUndo := true // CLEANER
+
+	sourceName := container.Name()
+	targetName := sourceName + "/rw"
+	sourceLvmName := containerNameToLVName(sourceName)
+	targetLvmName := containerNameToLVName(targetName)
 
-	shared.LogDebug(
-		"Creating snapshot",
-		log.Ctx{"srcName": srcName, "destName": destName})
+	shared.LogDebug("Creating snapshot", log.Ctx{"srcName": sourceLvmName, "destName": targetLvmName})
 
-	lvpath, err := s.createSnapshotLV(destName, srcName, false)
+	lvpath, err := s.createSnapshotLV(s.pool.PoolName, sourceLvmName, storagePoolVolumeApiEndpointContainers, targetLvmName, storagePoolVolumeApiEndpointContainers, false)
 	if err != nil {
-		return fmt.Errorf("Error creating snapshot LV: %v", err)
+		return fmt.Errorf("Error creating snapshot LV: %s", err)
 	}
-
-	destPath := container.Path()
-	if !shared.PathExists(destPath) {
-		if err := os.MkdirAll(destPath, 0755); err != nil {
-			return fmt.Errorf("Error creating container directory: %v", err)
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			s.removeLV(s.pool.PoolName, storagePoolVolumeApiEndpointContainers, targetLvmName)
 		}
+	}()
+
+	tmpContainerMntPoint := getSnapshotMountPoint(s.pool.PoolName, targetName)
+	fields := strings.SplitN(sourceName, shared.SnapshotDelimiter, 2)
+	destName := fields[0]
+	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.PoolName, "snapshots", destName)
+	snapshotMntPointSymlink := shared.VarPath("snapshots", destName)
+	err = createSnapshotMountpoint(tmpContainerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
+	if err != nil {
+		return err
 	}
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			deleteSnapshotMountpoint(tmpContainerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
+		}
+	}()
 
+	lvFsType := s.volume.VolumeConfig["block.filesystem"]
+	containerLvmPath := s.getLvmDevPath(s.pool.PoolName, storagePoolVolumeApiEndpointContainers, targetLvmName)
+	mountOptions := s.volume.VolumeConfig["block.mount_options"]
+	containerMntPoint := getContainerMountPoint(s.pool.PoolName, targetName)
 	// Generate a new xfs's UUID
-	fstype := daemonConfig["storage.lvm_fstype"].Get()
-	if fstype == "xfs" {
+	if lvFsType == "xfs" {
 		err := xfsGenerateNewUUID(lvpath)
 		if err != nil {
-			s.ContainerDelete(container)
 			return err
 		}
 	}
 
-	mountOptions := daemonConfig["storage.lvm_mount_options"].Get()
-	err = tryMount(lvpath, container.Path(), fstype, 0, mountOptions)
-	if err != nil {
-		return fmt.Errorf(
-			"Error mounting snapshot LV path='%s': %v",
-			container.Path(),
-			err)
+	if !shared.IsMountPoint(containerMntPoint) {
+		err = tryMount(containerLvmPath, containerMntPoint, lvFsType, 0, mountOptions)
+		if err != nil {
+			return fmt.Errorf("Error mounting snapshot LV path='%s': %s", containerMntPoint, err)
+		}
 	}
 
+	tryUndo = false // CLEANER
+
 	return nil
 }
 
 func (s *storageLvm) ContainerSnapshotStop(container container) error {
-	err, _ := s.ContainerUmount(container.Name(), container.Path())
-	if err != nil {
-		return err
+	name := container.Name()
+	snapshotMntPoint := getSnapshotMountPoint(s.pool.PoolName, name)
+
+	if shared.IsMountPoint(snapshotMntPoint) {
+		err := tryUnmount(snapshotMntPoint, 0)
+		if err != nil {
+			return err
+		}
 	}
 
-	lvName := containerNameToLVName(container.Name() + "/rw")
-	if err := s.removeLV(lvName); err != nil {
+	containerLvmName := containerNameToLVName(name + "/rw")
+	err := s.removeLV(s.pool.PoolName, storagePoolVolumeApiEndpointContainers, containerLvmName)
+	if err != nil {
 		return err
 	}
 
@@ -779,267 +1017,264 @@ func (s *storageLvm) ContainerSnapshotCreateEmpty(snapshotContainer container) e
 }
 
 func (s *storageLvm) ImageCreate(fingerprint string) error {
-	finalName := shared.VarPath("images", fingerprint)
+	tryUndo := true // CLEANER
 
-	lvpath, err := s.createThinLV(fingerprint)
+	vgName := s.pool.PoolName
+	thinPoolName := s.volume.VolumeConfig["lvm.thinpool_name"]
+	lvFsType := s.volume.VolumeConfig["block.filesystem"]
+	lvSize := s.volume.VolumeConfig["size"]
+	err := s.createThinLV(vgName, thinPoolName, fingerprint, lvFsType, lvSize, storagePoolVolumeApiEndpointImages)
 	if err != nil {
 		s.log.Error("LVMCreateThinLV", log.Ctx{"err": err})
 		return fmt.Errorf("Error Creating LVM LV for new image: %v", err)
 	}
+	defer func( /* CLEANER */ ) {
+		if tryUndo {
+			s.ImageDelete(fingerprint)
+		}
+	}()
 
-	dst := shared.VarPath("images", fmt.Sprintf("%s.lv", fingerprint))
-	err = os.Symlink(lvpath, dst)
-	if err != nil {
-		return err
+	// COMMENT(brauner): Create image mountpoint.
+	imageMntPoint := getImageMountPoint(s.pool.PoolName, fingerprint)
+	if !shared.PathExists(imageMntPoint) {
+		err := os.MkdirAll(imageMntPoint, 0700)
+		if err != nil {
+			return err
+		}
 	}
 
-	tempLVMountPoint, err := ioutil.TempDir(shared.VarPath("images"), "tmp_lv_mnt")
+	err, _ = s.ImageMount(fingerprint)
 	if err != nil {
 		return err
 	}
-	defer func() {
-		if err := os.RemoveAll(tempLVMountPoint); err != nil {
-			s.log.Error("Deleting temporary LVM mount point", log.Ctx{"err": err})
-		}
-	}()
 
-	fstype := daemonConfig["storage.lvm_fstype"].Get()
-	mountOptions := daemonConfig["storage.lvm_mount_options"].Get()
-	err = tryMount(lvpath, tempLVMountPoint, fstype, 0, mountOptions)
+	imagePath := shared.VarPath("images", fingerprint)
+	err = unpackImage(s.d, imagePath, imageMntPoint, storageTypeLvm)
 	if err != nil {
-		shared.LogInfof("Error mounting image LV for unpacking: %v", err)
-		return fmt.Errorf("Error mounting image LV: %v", err)
+		return err
 	}
 
-	unpackErr := unpackImage(s.d, finalName, tempLVMountPoint, storageTypeLvm)
+	s.ImageUmount(fingerprint)
 
-	err = tryUnmount(tempLVMountPoint, 0)
-	if err != nil {
-		s.log.Warn("could not unmount LV. Will not remove",
-			log.Ctx{"lvpath": lvpath, "mountpoint": tempLVMountPoint, "err": err})
-		if unpackErr == nil {
-			return err
-		}
-
-		return fmt.Errorf(
-			"Error unmounting '%s' during cleanup of error %v",
-			tempLVMountPoint, unpackErr)
-	}
-
-	if unpackErr != nil {
-		s.removeLV(fingerprint)
-		return unpackErr
-	}
+	tryUndo = false // CLEANER
 
 	return nil
 }
 
 func (s *storageLvm) ImageDelete(fingerprint string) error {
-	err := s.removeLV(fingerprint)
+	err, _ := s.ImageUmount(fingerprint)
 	if err != nil {
 		return err
 	}
 
-	lvsymlink := fmt.Sprintf(
-		"%s.lv", shared.VarPath("images", fingerprint))
-	err = os.Remove(lvsymlink)
+	err = s.removeLV(s.pool.PoolName, storagePoolVolumeApiEndpointImages, fingerprint)
 	if err != nil {
-		return fmt.Errorf(
-			"Failed to remove symlink to deleted image LV: '%s': %v", lvsymlink, err)
+		return err
+	}
+
+	imageMntPoint := getImageMountPoint(s.pool.PoolName, fingerprint)
+	if shared.PathExists(imageMntPoint) {
+		err := os.Remove(imageMntPoint)
+		if err != nil {
+			return err
+		}
 	}
 
 	return nil
 }
 
 func (s *storageLvm) ImageMount(fingerprint string) (error, bool) {
-	return nil, true
-}
-
-func (s *storageLvm) ImageUmount(fingerprint string) (error, bool) {
-	return nil, true
-}
+	imageMntPoint := getImageMountPoint(s.pool.PoolName, fingerprint)
+	if shared.IsMountPoint(imageMntPoint) {
+		return nil, false
+	}
 
-func (s *storageLvm) createDefaultThinPool() (string, error) {
-	thinPoolName := daemonConfig["storage.lvm_thinpool_name"].Get()
-	isRecent, err := s.lvmVersionIsAtLeast("2.02.99")
-	if err != nil {
-		return "", fmt.Errorf("Error checking LVM version: %v", err)
+	// COMMENT(brauner): Shouldn't happen.
+	lvmFstype := s.volume.VolumeConfig["block.filesystem"]
+	if lvmFstype == "" {
+		return fmt.Errorf("No filesystem type specified."), false
 	}
 
-	// Create the thin pool
-	var output []byte
-	if isRecent {
-		output, err = tryExec(
-			"lvcreate",
-			"--poolmetadatasize", "1G",
-			"-l", "100%FREE",
-			"--thinpool",
-			fmt.Sprintf("%s/%s", s.pool.PoolName, thinPoolName))
-	} else {
-		output, err = tryExec(
-			"lvcreate",
-			"--poolmetadatasize", "1G",
-			"-L", "1G",
-			"--thinpool",
-			fmt.Sprintf("%s/%s", s.pool.PoolName, thinPoolName))
+	lvmVolumePath := s.getLvmDevPath(s.pool.PoolName, storagePoolVolumeApiEndpointImages, fingerprint)
+	lvmMountOptions := s.volume.VolumeConfig["block.mount_options"]
+	// COMMENT(brauner): Shouldn't be necessary since it should be
+	// validated in the config checks.
+	if lvmFstype == "ext4" && lvmMountOptions == "" {
+		lvmMountOptions = "discard"
 	}
 
+	err := tryMount(lvmVolumePath, imageMntPoint, lvmFstype, 0, lvmMountOptions)
 	if err != nil {
-		s.log.Error(
-			"Could not create thin pool",
-			log.Ctx{
-				"name":   thinPoolName,
-				"err":    err,
-				"output": string(output)})
-
-		return "", fmt.Errorf(
-			"Could not create LVM thin pool named %s", thinPoolName)
+		shared.LogInfof("Error mounting image LV for unpacking: %s", err)
+		return fmt.Errorf("Error mounting image LV: %v", err), false
 	}
 
-	if !isRecent {
-		// Grow it to the maximum VG size (two step process required by old LVM)
-		output, err = tryExec(
-			"lvextend",
-			"--alloc", "anywhere",
-			"-l", "100%FREE",
-			fmt.Sprintf("%s/%s", s.pool.PoolName, thinPoolName))
+	return nil, true
+}
 
-		if err != nil {
-			s.log.Error(
-				"Could not grow thin pool",
-				log.Ctx{
-					"name":   thinPoolName,
-					"err":    err,
-					"output": string(output)})
-
-			return "", fmt.Errorf(
-				"Could not grow LVM thin pool named %s", thinPoolName)
-		}
+func (s *storageLvm) ImageUmount(fingerprint string) (error, bool) {
+	imageMntPoint := getImageMountPoint(s.pool.PoolName, fingerprint)
+	if !shared.IsMountPoint(imageMntPoint) {
+		return nil, false
 	}
 
-	return thinPoolName, nil
-}
+	err := tryUnmount(imageMntPoint, 0)
+	if err != nil {
+		return err, false
+	}
 
-func (s *storageLvm) createThinLV(lvname string) (string, error) {
-	var err error
+	return nil, true
+}
 
-	vgname := daemonConfig["storage.lvm_vg_name"].Get()
-	poolname := daemonConfig["storage.lvm_thinpool_name"].Get()
-	exists, err := storageLVMThinpoolExists(vgname, poolname)
+func (s *storageLvm) createThinLV(vgName string, thinPoolName string, lvName string, lvFsType string, lvSize string, volumeType string) error {
+	exists, err := storageLVMThinpoolExists(vgName, thinPoolName)
 	if err != nil {
-		return "", err
+		return err
 	}
 
 	if !exists {
-		poolname, err = s.createDefaultThinPool()
+		err := s.createDefaultThinPool(vgName, thinPoolName, lvName, lvFsType)
 		if err != nil {
-			return "", fmt.Errorf("Error creating LVM thin pool: %v", err)
+			return err
 		}
 
-		err = storageLVMValidateThinPoolName(s.d, "", poolname)
+		err = storageLVMValidateThinPoolName(s.d, vgName, thinPoolName)
 		if err != nil {
 			s.log.Error("Setting thin pool name", log.Ctx{"err": err})
-			return "", fmt.Errorf("Error setting LVM thin pool config: %v", err)
+			return fmt.Errorf("Error setting LVM thin pool config: %v", err)
 		}
 	}
 
-	lvSize := daemonConfig["storage.lvm_volume_size"].Get()
-
+	lvmThinPoolPath := fmt.Sprintf("%s/%s", vgName, thinPoolName)
+	lvmPoolVolumeName := s.getPrefixedLvName(volumeType, lvName)
 	output, err := tryExec(
 		"lvcreate",
 		"--thin",
-		"-n", lvname,
-		"--virtualsize", lvSize,
-		fmt.Sprintf("%s/%s", s.pool.PoolName, poolname))
+		"-n", lvmPoolVolumeName,
+		"--virtualsize", lvSize+"B", lvmThinPoolPath)
 	if err != nil {
-		s.log.Error("Could not create LV", log.Ctx{"lvname": lvname, "output": string(output)})
-		return "", fmt.Errorf("Could not create thin LV named %s", lvname)
+		s.log.Error("Could not create LV", log.Ctx{"lvname": lvmPoolVolumeName, "output": string(output)})
+		return fmt.Errorf("Could not create thin LV named %s", lvmPoolVolumeName)
 	}
 
-	lvpath := fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, lvname)
-
-	fstype := daemonConfig["storage.lvm_fstype"].Get()
-	switch fstype {
+	fsPath := s.getLvmDevPath(vgName, volumeType, lvName)
+	switch lvFsType {
 	case "xfs":
-		output, err = tryExec(
-			"mkfs.xfs",
-			lvpath)
+		output, err = tryExec("mkfs.xfs", fsPath)
 	default:
 		// default = ext4
 		output, err = tryExec(
 			"mkfs.ext4",
 			"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0",
-			lvpath)
+			fsPath)
 	}
 
 	if err != nil {
 		s.log.Error("Filesystem creation failed", log.Ctx{"output": string(output)})
-		return "", fmt.Errorf("Error making filesystem on image LV: %v", err)
+		return fmt.Errorf("Error making filesystem on image LV: %v", err)
 	}
 
-	return lvpath, nil
+	return nil
 }
 
-func (s *storageLvm) removeLV(lvname string) error {
-	var err error
+func (s *storageLvm) createDefaultThinPool(vgName string, thinPoolName string, lvName string, lvFsType string) error {
+	isRecent, err := s.lvmVersionIsAtLeast("2.02.99")
+	if err != nil {
+		return fmt.Errorf("Error checking LVM version: %s", err)
+	}
+
+	// Create the thin pool
+	lvmThinPool := fmt.Sprintf("%s/%s", vgName, thinPoolName)
 	var output []byte
+	if isRecent {
+		output, err = tryExec(
+			"lvcreate",
+			"--poolmetadatasize", "1G",
+			"-l", "100%FREE",
+			"--thinpool", lvmThinPool)
+	} else {
+		output, err = tryExec(
+			"lvcreate",
+			"--poolmetadatasize", "1G",
+			"-L", "1G",
+			"--thinpool", lvmThinPool)
+	}
+
+	if err != nil {
+		s.log.Error("Could not create thin pool", log.Ctx{"name": thinPoolName, "err": err, "output": string(output)})
+		return fmt.Errorf("Could not create LVM thin pool named %s", thinPoolName)
+	}
+
+	if !isRecent {
+		// Grow it to the maximum VG size (two step process required by old LVM)
+		output, err = tryExec("lvextend", "--alloc", "anywhere", "-l", "100%FREE", lvmThinPool)
 
-	output, err = tryExec(
-		"lvremove", "-f", fmt.Sprintf("%s/%s", s.pool.PoolName, lvname))
+		if err != nil {
+			s.log.Error("Could not grow thin pool", log.Ctx{"name": thinPoolName, "err": err, "output": string(output)})
+			return fmt.Errorf("Could not grow LVM thin pool named %s", thinPoolName)
+		}
+	}
+
+	return nil
+}
+
+func (s *storageLvm) removeLV(vgName string, volumeType string, lvName string) error {
+	lvmVolumePath := s.getLvmDevPath(vgName, volumeType, lvName)
+	output, err := tryExec("lvremove", "-f", lvmVolumePath)
 
 	if err != nil {
-		s.log.Error("Could not remove LV", log.Ctx{"lvname": lvname, "output": string(output)})
-		return fmt.Errorf("Could not remove LV named %s", lvname)
+		s.log.Error("Could not remove LV", log.Ctx{"lvname": lvName, "output": string(output)})
+		return fmt.Errorf("Could not remove LV named %s", lvName)
 	}
 
 	return nil
 }
 
-func (s *storageLvm) createSnapshotLV(lvname string, origlvname string, readonly bool) (string, error) {
-	s.log.Debug("in createSnapshotLV:", log.Ctx{"lvname": lvname, "dev string": fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, origlvname)})
+func (s *storageLvm) createSnapshotLV(vgName string, origLvName string, origVolumeType string, lvName string, volumeType string, readonly bool) (string, error) {
+	sourceLvmVolumePath := s.getLvmDevPath(vgName, origVolumeType, origLvName)
+	s.log.Debug("in createSnapshotLV:", log.Ctx{"lvname": lvName, "dev string": sourceLvmVolumePath})
 	isRecent, err := s.lvmVersionIsAtLeast("2.02.99")
 	if err != nil {
 		return "", fmt.Errorf("Error checking LVM version: %v", err)
 	}
+
+	lvmPoolVolumeName := s.getPrefixedLvName(volumeType, lvName)
 	var output []byte
 	if isRecent {
 		output, err = tryExec(
 			"lvcreate",
 			"-kn",
-			"-n", lvname,
-			"-s", fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, origlvname))
+			"-n", lvmPoolVolumeName,
+			"-s", sourceLvmVolumePath)
 	} else {
 		output, err = tryExec(
 			"lvcreate",
-			"-n", lvname,
-			"-s", fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, origlvname))
+			"-n", lvmPoolVolumeName,
+			"-s", sourceLvmVolumePath)
 	}
 	if err != nil {
-		s.log.Error("Could not create LV snapshot", log.Ctx{"lvname": lvname, "origlvname": origlvname, "output": string(output)})
-		return "", fmt.Errorf("Could not create snapshot LV named %s", lvname)
+		s.log.Error("Could not create LV snapshot", log.Ctx{"lvname": lvName, "origlvname": origLvName, "output": string(output)})
+		return "", fmt.Errorf("Could not create snapshot LV named %s", lvName)
 	}
 
-	snapshotFullName := fmt.Sprintf("/dev/%s/%s", s.pool.PoolName, lvname)
-
+	targetLvmVolumePath := s.getLvmDevPath(vgName, volumeType, lvName)
 	if readonly {
-		output, err = tryExec("lvchange", "-ay", "-pr", snapshotFullName)
+		output, err = tryExec("lvchange", "-ay", "-pr", targetLvmVolumePath)
 	} else {
-		output, err = tryExec("lvchange", "-ay", snapshotFullName)
+		output, err = tryExec("lvchange", "-ay", targetLvmVolumePath)
 	}
 
 	if err != nil {
-		return "", fmt.Errorf("Could not activate new snapshot '%s': %v\noutput:%s", lvname, err, string(output))
+		return "", fmt.Errorf("Could not activate new snapshot '%s': %v\noutput:%s", lvName, err, string(output))
 	}
 
-	return snapshotFullName, nil
-}
-
-func (s *storageLvm) isLVMContainer(container container) bool {
-	return shared.PathExists(fmt.Sprintf("%s.lv", container.Path()))
+	return targetLvmVolumePath, nil
 }
 
-func (s *storageLvm) renameLV(oldName string, newName string) (string, error) {
-	output, err := tryExec("lvrename", s.pool.PoolName, oldName, newName)
+func (s *storageLvm) renameLV(oldName string, newName string, volumeType string) (string, error) {
+	oldLvmName := s.getPrefixedLvName(volumeType, oldName)
+	newLvmName := s.getPrefixedLvName(volumeType, newName)
+	output, err := tryExec("lvrename", s.pool.PoolName, oldLvmName, newLvmName)
 	return string(output), err
 }
 

From 7240b8591479a5a96fc2183139baa3cb6497d214 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 26 Jan 2017 17:12:36 +0100
Subject: [PATCH 49/63] containers: validate configuration

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/container.go | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 4acc8db..2af62cb 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -674,14 +674,20 @@ func containerCreateInternal(d *Daemon, args containerArgs) (container, error) {
 
 	// Get the ID of the storage pool to which the storage volume we will
 	// create for the container will be attached to.
-	poolID, err := dbStoragePoolGetID(d.db, args.StoragePool)
+	poolID, pool, err := dbStoragePoolGet(d.db, args.StoragePool)
+	if err != nil {
+		return nil, err
+	}
+
+	// Validate the requested storage volume configuration.
+	volumeConfig := map[string]string{}
+	err = storageVolumeValidateConfig(args.StoragePool, volumeConfig, pool)
 	if err != nil {
 		return nil, err
 	}
 
 	// Create a new database entry for the container's storage volume we
 	// will create on the storage pool.
-	var volumeConfig map[string]string
 	_, err = dbStoragePoolVolumeCreate(d.db, args.Name, storagePoolVolumeTypeContainer, poolID, volumeConfig)
 	if err != nil {
 		return nil, err

From 89512d0f3fd0b5a559e5f55359bbdd374bcaa7b9 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 26 Jan 2017 17:12:56 +0100
Subject: [PATCH 50/63] images: validate configuration

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/images.go | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/lxd/images.go b/lxd/images.go
index a283c28..8177dd4 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -642,13 +642,19 @@ func imageCreateInPool(d *Daemon, info *api.Image) error {
 	}
 
 	// Get the ID of the storage pool to which the image will be attached.
-	poolID, err := dbStoragePoolGetID(d.db, info.StoragePool)
+	poolID, pool, err := dbStoragePoolGet(d.db, info.StoragePool)
+	if err != nil {
+		return err
+	}
+
+	// Validate the requested storage volume configuration.
+	volumeConfig := map[string]string{}
+	err = storageVolumeValidateConfig(info.StoragePool, volumeConfig, pool)
 	if err != nil {
 		return err
 	}
 
 	// Create a db entry for the storage volume of the image.
-	var volumeConfig map[string]string
 	_, err = dbStoragePoolVolumeCreate(d.db, info.Fingerprint, storagePoolVolumeTypeImage, poolID, volumeConfig)
 	if err != nil {
 		return err

From d05473a838c53305cda11e33d92abf1b06ff7469 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sun, 29 Jan 2017 20:54:19 +0100
Subject: [PATCH 51/63] lxd/container_lxc: start storage earlier

When either one of the following two conditions hold:
- the storage pool got (externally) unmounted && the storage type in question
  only requires the pool to be mounted (e.g. btrfs)
- the container's storage volume got (externally) unmounted
In these cases we will be unable to start any containers because liblxc, when
saving the config file, also performs a check whether the c->config_path for the
container exists. By default c->config_path in liblxc will be set to the
container's path e.g. /var/lib/lxd/containers. This will be combined with the
containers name. So liblxc checks whether
/var/lib/lxd/containers/<container_name> exists which isn't the case e.g. with
btrfs where this only exists if the pool is mounted.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/container_lxc.go | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index d30b333..c65a0cf 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -1847,20 +1847,21 @@ func (c *containerLXC) startCommon() (string, error) {
 		}
 	}
 
-	// Generate the LXC config
-	configPath := filepath.Join(c.LogPath(), "lxc.conf")
-	err = c.c.SaveConfigFile(configPath)
+	// Storage is guaranteed to be mountable now.
+	err = c.StorageStart()
 	if err != nil {
-		os.Remove(configPath)
 		return "", err
 	}
 
-	// Update the backup.yaml file (as storage is guaranteed to be mountable now)
-	err = c.StorageStart()
+	// Generate the LXC config
+	configPath := filepath.Join(c.LogPath(), "lxc.conf")
+	err = c.c.SaveConfigFile(configPath)
 	if err != nil {
+		os.Remove(configPath)
 		return "", err
 	}
 
+	// Update the backup.yaml file
 	err = writeBackupFile(c)
 	if err != nil {
 		c.StorageStop()

From 8330ccc5f99f28e706fe1f43e5fff3d7194c0148 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Tue, 31 Jan 2017 23:14:40 +0100
Subject: [PATCH 52/63] lxd/daemon: apply patches before storage starts

In order to properly upgrade from a pre-storage-api to a storage-api LXD
instance we need to be able to apply patches before storage starts up.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/daemon.go | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 18b2dc3..2b709c0 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -811,18 +811,18 @@ func (d *Daemon) Init() error {
 	}
 
 	if !d.MockMode {
-		/* Setup the storage driver */
-		err = d.SetupStorageDriver()
-		if err != nil {
-			return fmt.Errorf("Failed to setup storage: %s", err)
-		}
-
 		/* Apply all patches */
 		err = patchesApplyAll(d)
 		if err != nil {
 			return err
 		}
 
+		/* Setup the storage driver */
+		err = d.SetupStorageDriver()
+		if err != nil {
+			return fmt.Errorf("Failed to setup storage: %s", err)
+		}
+
 		/* Setup the networks */
 		err = networkStartup(d)
 		if err != nil {

From df54dee38a2cf34de75834d15ff28dbfe1675b7d Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Tue, 31 Jan 2017 23:18:02 +0100
Subject: [PATCH 53/63] lxd/patches: implement patchStorageApi()

This commit implementes upgrading from a pre-storage-api to a storage-api LXD
instance. Upgrade from ttrfs, dir, lvm, and zfs backends is supported.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/patches.go | 842 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 shared/util.go |   9 +
 2 files changed, 851 insertions(+)

diff --git a/lxd/patches.go b/lxd/patches.go
index f3dcc97..708d4a6 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -1,7 +1,10 @@
 package main
 
 import (
+	"fmt"
 	"os"
+	"os/exec"
+	"strconv"
 	"strings"
 
 	"github.com/lxc/lxd/shared"
@@ -30,6 +33,7 @@ var patches = []patch{
 	{name: "invalid_profile_names", run: patchInvalidProfileNames},
 	{name: "leftover_profile_config", run: patchLeftoverProfileConfig},
 	{name: "network_permissions", run: patchNetworkPermissions},
+	{name: "storage_api", run: patchStorageApi},
 }
 
 type patch struct {
@@ -141,3 +145,841 @@ func patchNetworkPermissions(name string, d *Daemon) error {
 
 	return nil
 }
+
+func patchStorageApi(name string, d *Daemon) error {
+	lvmVgName := daemonConfig["storage.lvm_vg_name"].Get()
+	zfsPoolName := daemonConfig["storage.zfs_pool_name"].Get()
+	defaultPoolName := "default"
+	preStorageApiStorageType := storageTypeDir
+
+	if lvmVgName != "" {
+		preStorageApiStorageType = storageTypeLvm
+		defaultPoolName = lvmVgName
+	} else if zfsPoolName != "" {
+		preStorageApiStorageType = storageTypeZfs
+		defaultPoolName = zfsPoolName
+	} else if d.BackingFs == "btrfs" {
+		preStorageApiStorageType = storageTypeBtrfs
+	} else {
+		// COMMENT(brauner): Dir storage pool.
+	}
+
+	defaultStorageTypeName, err := storageTypeToString(preStorageApiStorageType)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): In case we detect that an lvm name or a zfs name
+	// exists it makes sense to create a storage pool in the database,
+	// independent of whether anything currently exists on that pool. We can
+	// still probably safely assume that the user at least once used that
+	// pool.
+	// However, when we detect {dir, btrfs}, we can't rely on that guess
+	// since the daemon doesn't record any name for the pool anywhere.  So
+	// in the {dir, btrfs} case we check whether anything exists on the
+	// pool, if not, then we don't create a default pool. The user will then
+	// be forced to run lxd init again and can start from a pristine state.
+	// COMMENT(brauner): Check if this LXD instace currently has any
+	// containers, snapshots, or images configured. If so, we create a
+	// default storage pool in the database. Otherwise, the user will have
+	// to run LXD init.
+	cRegular, err := dbContainersList(d.db, cTypeRegular)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Get list of existing snapshots.
+	cSnapshots, err := dbContainersList(d.db, cTypeSnapshot)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Get list of existing public images.
+	imgPublic, err := dbImagesGet(d.db, true)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Get list of existing private images.
+	imgPrivate, err := dbImagesGet(d.db, false)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Nothing exists on the pool so we're not creating a
+	// default one, thereby forcing the user to run lxd init.
+	if len(cRegular) == 0 && len(cSnapshots) == 0 && len(imgPublic) == 0 && len(imgPrivate) == 0 {
+		return nil
+	}
+
+	switch preStorageApiStorageType {
+	case storageTypeBtrfs:
+		err = upgradeFromStorageTypeBtrfs(name, d, defaultPoolName, defaultStorageTypeName, cRegular, cSnapshots, imgPublic, imgPrivate)
+	case storageTypeDir:
+		err = upgradeFromStorageTypeDir(name, d, defaultPoolName, defaultStorageTypeName, cRegular, cSnapshots, imgPublic, imgPrivate)
+	case storageTypeLvm:
+		err = upgradeFromStorageTypeLvm(name, d, defaultPoolName, defaultStorageTypeName, cRegular, cSnapshots, imgPublic, imgPrivate)
+	case storageTypeZfs:
+		err = upgradeFromStorageTypeZfs(name, d, defaultPoolName, defaultStorageTypeName, cRegular, []string{}, imgPublic, imgPrivate)
+	default: // Shouldn't happen.
+		return fmt.Errorf("Invalid storage type. Upgrading not possible.")
+	}
+	if err != nil {
+		return err
+	}
+
+	err = daemonConfig["storage.default_pool"].Set(d, defaultPoolName)
+	if err != nil {
+		return err
+	}
+
+	containers := append(cRegular, cSnapshots...)
+	for _, ct := range containers {
+		c, err := dbContainerGet(d.db, ct)
+		if err != nil {
+			shared.LogWarnf("Failed to add \"pool\" property to containers root device.")
+			continue
+		}
+
+		for k, v := range c.Devices {
+			if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" {
+				c.Devices[k]["pool"] = defaultPoolName
+
+				// COMMENT(brauner): Ugh, seems like the only
+				// way to update devices currently.
+				tx, err := dbBegin(d.db)
+				if err != nil {
+					return err
+				}
+
+				err = dbContainerConfigClear(tx, c.Id)
+				if err != nil {
+					tx.Rollback()
+					return err
+				}
+
+				err = dbContainerConfigInsert(tx, c.Id, c.Config)
+				if err != nil {
+					tx.Rollback()
+					return err
+				}
+
+				err = dbContainerProfilesInsert(tx, c.Id, c.Profiles)
+				if err != nil {
+					tx.Rollback()
+					return err
+				}
+
+				err = dbDevicesAdd(tx, "container", int64(c.Id), c.Devices)
+				if err != nil {
+					tx.Rollback()
+					return err
+				}
+
+				err = dbContainerUpdate(tx, c.Id, c.Architecture, c.Ephemeral)
+				if err != nil {
+					tx.Rollback()
+					return err
+				}
+
+				if err := txCommit(tx); err != nil {
+					return err
+				}
+
+				break
+			}
+		}
+	}
+
+	return nil
+}
+
+func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string, defaultStorageTypeName string, cRegular []string, cSnapshots []string, imgPublic []string, imgPrivate []string) error {
+	poolConfig := map[string]string{}
+	poolSubvolumePath := getStoragePoolMountPoint(defaultPoolName)
+	poolConfig["source"] = poolSubvolumePath
+
+	// TODO(brauner): Figure out how much space we have available. Nothing
+	// easy came to my mind.
+
+	poolID, err := dbStoragePoolCreate(d.db, defaultPoolName, defaultStorageTypeName, poolConfig)
+	if err != nil {
+		return err
+	}
+
+	s, err := storagePoolInit(d, defaultPoolName)
+	if err != nil {
+		return err
+	}
+
+	err = s.StoragePoolCreate()
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Create storage volumes in the database.
+	volumeConfig := map[string]string{}
+
+	if len(cRegular) > 0 {
+		// COMMENT(brauner):
+		// ${LXD_DIR}/storage-pools/<pool_name>
+		containersSubvolumePath := getContainerMountPoint(defaultPoolName, "")
+		err := os.MkdirAll(containersSubvolumePath, 0711)
+		if err != nil {
+			return err
+		}
+	}
+
+	for _, ct := range cRegular {
+		// COMMENT(brauner): Create new db entry in the storage volumes
+		// table for the container.
+		_, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig)
+		if err != nil {
+			shared.LogWarnf("Could not insert a storage volume for container \"%s\".", ct)
+			continue
+		}
+
+		// COMMENT(brauner): Rename the btrfs subvolume and making it a
+		// subvolume of the subvolume of the storage pool:
+		// mv ${LXD_DIR}/containers/<container_name> ${LXD_DIR}/storage-pools/<pool_name>/<container_name>
+		oldContainerMntPoint := shared.VarPath("containers", ct)
+		newContainerMntPoint := getContainerMountPoint(defaultPoolName, ct)
+		err = os.Rename(oldContainerMntPoint, newContainerMntPoint)
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Create a symlink to the mountpoint of the
+		// container:
+		// ${LXD_DIR}/containers/<container_name> -> ${LXD_DIR}/storage-pools/<pool_name>/containers/<container_name>
+		doesntMatter := false
+		err = createContainerMountpoint(newContainerMntPoint, oldContainerMntPoint, doesntMatter)
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Check if we need to account for snapshots
+		// for this container.
+		ctSnapshots, err := dbContainerGetSnapshots(d.db, ct)
+		if err != nil {
+			return err
+		}
+
+		if len(ctSnapshots) > 0 {
+			// COMMENT(brauner): Create the snapshots directory in
+			// the new storage pool:
+			// ${LXD_DIR}/storage-pools/<pool_name>/snapshots
+			newSnapshotsMntPoint := getSnapshotMountPoint(defaultPoolName, ct)
+			err = os.MkdirAll(newSnapshotsMntPoint, 0700)
+			if err != nil {
+				return err
+			}
+		}
+
+		for _, cs := range ctSnapshots {
+			// COMMENT(brauner): Insert storage volumes for
+			// snapshots into the database. Note that snapshots have
+			// already been moved and symlinked above. So no need to
+			// do any work here.
+			_, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig)
+			if err != nil {
+				shared.LogWarnf("Could not insert a storage volume for snapshot \"%s\".", cs)
+				continue
+			}
+
+			// COMMENT(brauner): We need to create a new snapshot
+			// since we can't move readonly snapshots.
+			oldSnapshotMntPoint := shared.VarPath("snapshots", cs)
+			newSnapshotMntPoint := getSnapshotMountPoint(defaultPoolName, cs)
+			err = exec.Command(
+				"btrfs",
+				"subvolume",
+				"snapshot",
+				"-r",
+				oldSnapshotMntPoint,
+				newSnapshotMntPoint).Run()
+			if err != nil {
+				return err
+			}
+
+			// COMMENT(brauner): Delete the old subvolume.
+			err = exec.Command(
+				"btrfs",
+				"subvolume",
+				"delete",
+				oldSnapshotMntPoint,
+			).Run()
+			if err != nil {
+				return err
+			}
+		}
+
+		if len(ctSnapshots) > 0 {
+			// COMMENT(brauner): Create a new symlink from the
+			// snapshots directory of the container to the snapshots
+			// directory on the storage pool:
+			// ${LXD_DIR}/snapshots/<container_name> -> ${LXD_DIR}/storage-pools/<pool_name>/snapshots/<container_name>
+			snapshotsPath := shared.VarPath("snapshots", ct)
+			newSnapshotMntPoint := getSnapshotMountPoint(defaultPoolName, ct)
+			if shared.PathExists(snapshotsPath) {
+				err := os.Remove(snapshotsPath)
+				if err != nil {
+					return err
+				}
+			}
+			err = os.Symlink(newSnapshotMntPoint, snapshotsPath)
+			if err != nil {
+				return err
+			}
+		}
+
+	}
+
+	// COMMENT(brauner): Insert storage volumes for images into the
+	// database. Images don't move. The tarballs remain in their original
+	// location.
+	images := append(imgPublic, imgPrivate...)
+	for _, img := range images {
+		_, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig)
+		if err != nil {
+			shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img)
+			continue
+		}
+
+		imagesMntPoint := getImageMountPoint(defaultPoolName, "")
+		err = os.MkdirAll(imagesMntPoint, 0700)
+		if err != nil {
+			return err
+		}
+
+		oldImageMntPoint := shared.VarPath("images", img+".btrfs")
+		newImageMntPoint := getImageMountPoint(defaultPoolName, img)
+		err = os.Rename(oldImageMntPoint, newImageMntPoint)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, defaultStorageTypeName string, cRegular []string, cSnapshots []string, imgPublic []string, imgPrivate []string) error {
+	poolConfig := map[string]string{}
+	poolConfig["source"] = shared.VarPath("storage-pools", defaultPoolName)
+
+	poolID, err := dbStoragePoolCreate(d.db, defaultPoolName, defaultStorageTypeName, poolConfig)
+	if err != nil {
+		return err
+	}
+
+	s, err := storagePoolInit(d, defaultPoolName)
+	if err != nil {
+		return err
+	}
+
+	err = s.StoragePoolCreate()
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Create storage volumes in the database.
+	volumeConfig := map[string]string{}
+	// COMMENT(brauner): Insert storage volumes for containers into the database.
+	for _, ct := range cRegular {
+		_, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig)
+		if err != nil {
+			shared.LogWarnf("Could not insert a storage volume for container \"%s\".", ct)
+			continue
+		}
+
+		// COMMENT(brauner): Create the new path where containers will
+		// be located on the new storage api.
+		containersMntPoint := getContainerMountPoint(defaultPoolName, "")
+		err = os.MkdirAll(containersMntPoint, 0711)
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Simply rename the container when they are
+		// directories.
+		oldContainerMntPoint := shared.VarPath("containers", ct)
+		newContainerMntPoint := getContainerMountPoint(defaultPoolName, ct)
+		err = os.Rename(oldContainerMntPoint, newContainerMntPoint)
+		if err != nil {
+			return err
+		}
+
+		doesntMatter := false
+		err = createContainerMountpoint(newContainerMntPoint, oldContainerMntPoint, doesntMatter)
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Check if we need to account for snapshots
+		// for this container.
+		oldSnapshotMntPoint := shared.VarPath("snapshots", ct)
+		if !shared.PathExists(oldSnapshotMntPoint) {
+			continue
+		}
+
+		// COMMENT(brauner): If the snapshots directory for that
+		// container is empty, remove it.
+		isEmpty, err := shared.PathIsEmpty(oldSnapshotMntPoint)
+		if isEmpty {
+			os.Remove(oldSnapshotMntPoint)
+			continue
+		}
+
+		// COMMENT(brauner): Create the new path where snapshots will
+		// be located on the new storage api.
+		snapshotsMntPoint := shared.VarPath("storage-pools", defaultPoolName, "snapshots")
+		err = os.MkdirAll(snapshotsMntPoint, 0711)
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Now simply rename the snapshots directory
+		// as well.
+		newSnapshotMntPoint := getSnapshotMountPoint(defaultPoolName, ct)
+		err = os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Create a symlink for this container.
+		// snapshots.
+		err = createSnapshotMountpoint(newSnapshotMntPoint, newSnapshotMntPoint, oldSnapshotMntPoint)
+		if err != nil {
+			return err
+		}
+	}
+
+	// COMMENT(brauner): Insert storage volumes for snapshots into the
+	// database. Note that snapshots have already been moved and symlinked
+	// above. So no need to do any work here.
+	for _, cs := range cSnapshots {
+		_, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig)
+		if err != nil {
+			shared.LogWarnf("Could not insert a storage volume for snapshot \"%s\".", cs)
+			continue
+		}
+	}
+
+	// COMMENT(brauner): Insert storage volumes for images into the
+	// database. Images don't move. The tarballs remain in their original
+	// location.
+	images := append(imgPublic, imgPrivate...)
+	for _, img := range images {
+		_, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig)
+		if err != nil {
+			shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img)
+			continue
+		}
+	}
+
+	return nil
+}
+
+func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, defaultStorageTypeName string, cRegular []string, cSnapshots []string, imgPublic []string, imgPrivate []string) error {
+	poolConfig := map[string]string{}
+	poolConfig["source"] = defaultPoolName
+	poolConfig["volume.lvm.thinpool_name"] = daemonConfig["storage.lvm_thinpool_name"].Get()
+	poolConfig["volume.block.filesystem"] = daemonConfig["storage.lvm_fstype"].Get()
+	poolConfig["volume.block.mount_options"] = daemonConfig["storage.lvm_mount_options"].Get()
+
+	// COMMENT(brauner): Get size of the volume group.
+	output, err := tryExec("vgs", "--nosuffix", "--units", "g", "--noheadings", "-o", "size", defaultPoolName)
+	if err != nil {
+		return err
+	}
+	tmp := string(output)
+	tmp = strings.TrimSpace(tmp)
+	szFloat, err := strconv.ParseFloat(tmp, 32)
+	if err != nil {
+		return err
+	}
+	szInt64 := shared.Round(szFloat)
+	poolConfig["size"] = fmt.Sprintf("%dGB", szInt64)
+
+	err = storagePoolValidateConfig(defaultPoolName, "lvm", poolConfig)
+	if err != nil {
+		return err
+	}
+
+	poolID, err := dbStoragePoolCreate(d.db, defaultPoolName, defaultStorageTypeName, poolConfig)
+	if err != nil {
+		return err
+	}
+
+	poolMntPoint := getStoragePoolMountPoint(defaultPoolName)
+	err = os.MkdirAll(poolMntPoint, 0711)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Create storage volumes in the database.
+	volumeConfig := map[string]string{}
+
+	if len(cRegular) > 0 {
+		newContainersMntPoint := getContainerMountPoint(defaultPoolName, "")
+		err = os.MkdirAll(newContainersMntPoint, 0711)
+		if err != nil {
+			return err
+		}
+	}
+
+	// COMMENT(brauner): Insert storage volumes for containers into the database.
+	for _, ct := range cRegular {
+		_, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig)
+		if err != nil {
+			shared.LogWarnf("Could not insert a storage volume for container \"%s\".", ct)
+			continue
+		}
+
+		// COMMENT(brauner): Unmount the logical volume.
+		oldContainerMntPoint := shared.VarPath("containers", ct)
+		if shared.IsMountPoint(oldContainerMntPoint) {
+			err := tryUnmount(oldContainerMntPoint, 0)
+			if err != nil {
+				return err
+			}
+		}
+
+		// COMMENT(brauner): Create the new path where containers will
+		// be located on the new storage api. We do os.Rename() here to
+		// preserve permissions and ownership.
+		newContainerMntPoint := getContainerMountPoint(defaultPoolName, ct)
+		err = os.Rename(oldContainerMntPoint, newContainerMntPoint)
+		if err != nil {
+			return err
+		}
+
+		if shared.PathExists(oldContainerMntPoint + ".lv") {
+			err := os.Remove(oldContainerMntPoint + ".lv")
+			if err != nil {
+				return err
+			}
+		}
+
+		// COMMENT(brauner): Rename the logical volume device.
+		ctLvName := containerNameToLVName(ct)
+		newContainerLvName := fmt.Sprintf("%s_%s", storagePoolVolumeApiEndpointContainers, ctLvName)
+		_, err = tryExec("lvrename", defaultPoolName, ctLvName, newContainerLvName)
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Create the new container mountpoint.
+		doesntMatter := false
+		err = createContainerMountpoint(newContainerMntPoint, oldContainerMntPoint, doesntMatter)
+		if err != nil {
+			return err
+		}
+
+		lvFsType := poolConfig["volume.block.filesystem"]
+		mountOptions := poolConfig["volume.block.mount_options"]
+		containerLvDevPath := fmt.Sprintf("/dev/%s/%s_%s", defaultPoolName, storagePoolVolumeApiEndpointContainers, ctLvName)
+		err = tryMount(containerLvDevPath, newContainerMntPoint, lvFsType, 0, mountOptions)
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Check if we need to account for snapshots
+		// for this container.
+		ctSnapshots, err := dbContainerGetSnapshots(d.db, ct)
+		if err != nil {
+			return err
+		}
+
+		for _, cs := range ctSnapshots {
+			// COMMENT(brauner): Insert storage volumes for
+			// snapshots.
+			_, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig)
+			if err != nil {
+				shared.LogWarnf("Could not insert a storage volume for snapshot \"%s\".", cs)
+				continue
+			}
+
+			// COMMENT(brauner): Create the snapshots directory in
+			// the new storage pool:
+			// ${LXD_DIR}/storage-pools/<pool_name>/snapshots
+			newSnapshotMntPoint := getSnapshotMountPoint(defaultPoolName, cs)
+			err = os.MkdirAll(newSnapshotMntPoint, 0700)
+			if err != nil {
+				return err
+			}
+
+			// COMMENT(brauner): Unmount the logical volume.
+			oldSnapshotMntPoint := shared.VarPath("snapshots", cs)
+			if shared.IsMountPoint(oldSnapshotMntPoint) {
+				err := tryUnmount(oldSnapshotMntPoint, 0)
+				if err != nil {
+					return err
+				}
+			}
+
+			// COMMENT(brauner): Rename the snapshot mountpoint to
+			// preserve acl's and so on.
+			err = os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
+			if err != nil {
+				return err
+			}
+
+			err = os.Remove(oldSnapshotMntPoint + ".lv")
+			if err != nil {
+				return err
+			}
+
+			// COMMENT(brauner): Make sure we use a valid lv name.
+			csLvName := containerNameToLVName(cs)
+			newSnapshotLvName := fmt.Sprintf("%s_%s", storagePoolVolumeApiEndpointContainers, csLvName)
+			_, err = tryExec("lvrename", defaultPoolName, csLvName, newSnapshotLvName)
+			if err != nil {
+				return err
+			}
+
+		}
+
+		if len(ctSnapshots) > 0 {
+			// COMMENT(brauner): Create a new symlink from the
+			// snapshots directory of the container to the snapshots
+			// directory on the storage pool:
+			// ${LXD_DIR}/snapshots/<container_name> -> ${LXD_DIR}/storage-pools/<pool_name>/snapshots/<container_name>
+			snapshotsPath := shared.VarPath("snapshots", ct)
+			newSnapshotsPath := getSnapshotMountPoint(defaultPoolName, ct)
+			if shared.PathExists(snapshotsPath) {
+				err := os.Remove(snapshotsPath)
+				if err != nil {
+					return err
+				}
+			}
+			err = os.Symlink(newSnapshotsPath, snapshotsPath)
+			if err != nil {
+				return err
+			}
+		}
+
+	}
+
+	images := append(imgPublic, imgPrivate...)
+	if len(images) > 0 {
+		imagesMntPoint := getImageMountPoint(defaultPoolName, "")
+		err := os.MkdirAll(imagesMntPoint, 0700)
+		if err != nil {
+			return err
+		}
+	}
+
+	for _, img := range images {
+		_, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig)
+		if err != nil {
+			shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img)
+			continue
+		}
+
+		// COMMENT(brauner): Unmount the logical volume.
+		oldImageMntPoint := shared.VarPath("images", img+".lv")
+		if shared.IsMountPoint(oldImageMntPoint) {
+			err := tryUnmount(oldImageMntPoint, 0)
+			if err != nil {
+				return err
+			}
+		}
+
+		if shared.PathExists(oldImageMntPoint) {
+			err := os.Remove(oldImageMntPoint)
+			if err != nil {
+				return err
+			}
+		}
+
+		newImageMntPoint := getImageMountPoint(defaultPoolName, img)
+		err = os.MkdirAll(newImageMntPoint, 0700)
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Rename the logical volume device.
+		newImageLvName := fmt.Sprintf("%s_%s", storagePoolVolumeApiEndpointImages, img)
+		_, err = tryExec("lvrename", defaultPoolName, img, newImageLvName)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, defaultStorageTypeName string, cRegular []string, cSnapshots []string, imgPublic []string, imgPrivate []string) error {
+	poolConfig := map[string]string{}
+	oldLoopFilePath := shared.VarPath("zfs.img")
+	if shared.PathExists(oldLoopFilePath) {
+		// COMMENT(brauner): This is a loop file pool.
+		poolConfig["source"] = shared.VarPath("disks", defaultPoolName+".img")
+		err := os.Rename(oldLoopFilePath, poolConfig["source"])
+		if err != nil {
+			return err
+		}
+	} else {
+		// COMMENT(brauner): This is a block device pool.
+		poolConfig["source"] = defaultPoolName
+	}
+	output, err := exec.Command("zpool", "get", "size", "-p", "-H", defaultPoolName).CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("Failed to set ZFS config: %s", output)
+	}
+	lidx := strings.LastIndex(string(output), "\t")
+	fidx := strings.LastIndex(string(output)[:lidx-1], "\t")
+	poolConfig["size"] = string(output)[fidx+1 : lidx]
+
+	poolID, err := dbStoragePoolCreate(d.db, defaultPoolName, defaultStorageTypeName, poolConfig)
+	if err != nil {
+		return err
+	}
+
+	// COMMENT(brauner): Create storage volumes in the database.
+	volumeConfig := map[string]string{}
+
+	if len(cRegular) > 0 {
+		containersSubvolumePath := getContainerMountPoint(defaultPoolName, "")
+		err := os.MkdirAll(containersSubvolumePath, 0711)
+		if err != nil {
+			return err
+		}
+	}
+
+	for _, ct := range cRegular {
+
+		// COMMENT(brauner): Insert storage volumes for containers into the database.
+		_, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig)
+		if err != nil {
+			shared.LogWarnf("Could not insert a storage volume for container \"%s\".", ct)
+			continue
+		}
+
+		// COMMENT(brauner): Unmount the container zfs doesn't really
+		// seem to care if we do this.
+		ctDataset := fmt.Sprintf("%s/containers/%s", defaultPoolName, ct)
+		oldContainerMntPoint := shared.VarPath("containers", ct)
+		if shared.IsMountPoint(oldContainerMntPoint) {
+			output, err := tryExec("zfs", "unmount", "-f", ctDataset)
+			if err != nil {
+				return fmt.Errorf("Failed to unmount ZFS filesystem: %s", output)
+			}
+		}
+
+		err = os.Remove(oldContainerMntPoint)
+		if err != nil {
+			return err
+		}
+
+		err = os.Remove(oldContainerMntPoint + ".zfs")
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Changing the mountpoint property should
+		// have actually created the path but in case it somehow didn't
+		// let's do it ourselves.
+		doesntMatter := false
+		newContainerMntPoint := getContainerMountPoint(defaultPoolName, ct)
+		err = createContainerMountpoint(newContainerMntPoint, oldContainerMntPoint, doesntMatter)
+		if err != nil {
+			return err
+		}
+
+		// COMMENT(brauner): Set new mountpoint for the container's
+		// dataset it will be automatically mounted.
+		output, err := exec.Command(
+			"zfs",
+			"set",
+			fmt.Sprintf("mountpoint=%s", newContainerMntPoint),
+			ctDataset).CombinedOutput()
+		if err != nil {
+			return fmt.Errorf("Failed to set new ZFS mountpoint: %s.", output)
+		}
+
+		// COMMENT(brauner): Check if we need to account for snapshots
+		// for this container.
+		ctSnapshots, err := dbContainerGetSnapshots(d.db, ct)
+		if err != nil {
+			return err
+		}
+
+		snapshotsPath := shared.VarPath("snapshots", ct)
+		for _, cs := range ctSnapshots {
+			// COMMENT(brauner): Insert storage volumes for
+			// snapshots into the database. Note that snapshots have
+			// already been moved and symlinked above. So no need to
+			// do any work here.
+			_, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig)
+			if err != nil {
+				shared.LogWarnf("Could not insert a storage volume for snapshot \"%s\".", cs)
+				continue
+			}
+
+			// COMMENT(brauner): Create the new mountpoint for
+			// snapshots in the new storage api.
+			newSnapshotMntPoint := getSnapshotMountPoint(defaultPoolName, cs)
+			err = os.MkdirAll(newSnapshotMntPoint, 0711)
+			if err != nil {
+				return err
+			}
+		}
+
+		os.RemoveAll(snapshotsPath)
+
+		// COMMENT(brauner): Create a symlink for this container's
+		// snapshots.
+		if len(ctSnapshots) != 0 {
+			newSnapshotsMntPoint := getSnapshotMountPoint(defaultPoolName, ct)
+			err := os.Symlink(newSnapshotsMntPoint, snapshotsPath)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// COMMENT(brauner): Insert storage volumes for images into the
+	// database. Images don't move. The tarballs remain in their original
+	// location.
+	images := append(imgPublic, imgPrivate...)
+	for _, img := range images {
+		_, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig)
+		if err != nil {
+			shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img)
+			continue
+		}
+
+		imageMntPoint := getImageMountPoint(defaultPoolName, img)
+		err = os.MkdirAll(imageMntPoint, 0700)
+		if err != nil {
+			return err
+		}
+
+		oldImageMntPoint := shared.VarPath("images", img+".zfs")
+		imageDataset := fmt.Sprintf("%s/images/%s", defaultPoolName, img)
+		if shared.PathExists(oldImageMntPoint) {
+			if shared.IsMountPoint(oldImageMntPoint) {
+				output, err := tryExec("zfs", "unmount", "-f", imageDataset)
+				if err != nil {
+					return fmt.Errorf("Failed to unmount ZFS filesystem: %s", output)
+				}
+			}
+
+			err = os.Remove(oldImageMntPoint)
+			if err != nil {
+				return err
+			}
+		}
+
+		// COMMENT(brauner): Set new mountpoint for the container's
+		// dataset it will be automatically mounted.
+		output, err := exec.Command("zfs", "set", "mountpoint=none", imageDataset).CombinedOutput()
+		if err != nil {
+			return fmt.Errorf("Failed to set new ZFS mountpoint: %s.", output)
+		}
+	}
+
+	return nil
+}
diff --git a/shared/util.go b/shared/util.go
index 21c2b29..fbdd565 100644
--- a/shared/util.go
+++ b/shared/util.go
@@ -10,6 +10,7 @@ import (
 	"fmt"
 	"io"
 	"io/ioutil"
+	"math"
 	"net/http"
 	"os"
 	"os/exec"
@@ -786,3 +787,11 @@ func TimeIsSet(ts time.Time) bool {
 
 	return true
 }
+
+func Round(x float64) int64 {
+	if x < 0 {
+		return int64(math.Ceil(x - 0.5))
+	}
+
+	return int64(math.Floor(x + 0.5))
+}

From a5cc631b2c0b7dbfd35b1b9be4815afec268b226 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 15:32:04 +0100
Subject: [PATCH 54/63] daemon_config: deprecate storage keys

Global storage keys are marked deprecated in new LXD instances. Upgrading from a
LXD instance that has these keys set will work. The only thing that changes is
that setting these keys is not possible anymore.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/daemon_config.go | 47 ++++++++++++++++-------------------------------
 lxd/storage_lvm.go   | 20 --------------------
 2 files changed, 16 insertions(+), 51 deletions(-)

diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index adb72ea..00e28f3 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -189,15 +189,18 @@ func daemonConfigInit(db *sql.DB) error {
 		"images.remote_cache_expiry":   {valueType: "int", defaultValue: "10", trigger: daemonConfigTriggerExpiry},
 		"images.default_storage_pool":  {valueType: "string", validator: daemonConfigValidateDefaultPool},
 
-		"storage.lvm_fstype":           {valueType: "string", defaultValue: "ext4", validValues: []string{"ext4", "xfs"}},
-		"storage.lvm_mount_options":    {valueType: "string", defaultValue: "discard"},
-		"storage.lvm_thinpool_name":    {valueType: "string", defaultValue: "LXDPool", validator: storageLVMValidateThinPoolName},
-		"storage.lvm_vg_name":          {valueType: "string", validator: storageLVMValidateVolumeGroupName, setter: daemonConfigSetStorage},
-		"storage.lvm_volume_size":      {valueType: "string", defaultValue: "10GiB"},
-		"storage.zfs_pool_name":        {valueType: "string", validator: storageZFSValidatePoolName, setter: daemonConfigSetStorage},
-		"storage.zfs_remove_snapshots": {valueType: "bool"},
-		"storage.zfs_use_refquota":     {valueType: "bool"},
-		"storage.default_pool":         {valueType: "string", validator: daemonConfigValidateDefaultPool},
+		"storage.default_pool": {valueType: "string", validator: daemonConfigValidateDefaultPool},
+
+		// COMMENT(brauner): Keys deprecated since the implementation of
+		// the storage api.
+		"storage.lvm_fstype":           {valueType: "string", defaultValue: "ext4", validValues: []string{"ext4", "xfs"}, validator: storageDeprecatedKeys},
+		"storage.lvm_mount_options":    {valueType: "string", defaultValue: "discard", validator: storageDeprecatedKeys},
+		"storage.lvm_thinpool_name":    {valueType: "string", defaultValue: "LXDPool", validator: storageDeprecatedKeys},
+		"storage.lvm_vg_name":          {valueType: "string", validator: storageDeprecatedKeys},
+		"storage.lvm_volume_size":      {valueType: "string", defaultValue: "10GiB", validator: storageDeprecatedKeys},
+		"storage.zfs_pool_name":        {valueType: "string", validator: storageDeprecatedKeys},
+		"storage.zfs_remove_snapshots": {valueType: "bool", validator: storageDeprecatedKeys},
+		"storage.zfs_use_refquota":     {valueType: "bool", validator: storageDeprecatedKeys},
 	}
 
 	// Load the values from the DB
@@ -262,28 +265,6 @@ func daemonConfigSetPassword(d *Daemon, key string, value string) (string, error
 	return value, nil
 }
 
-func daemonConfigSetStorage(d *Daemon, key string, value string) (string, error) {
-	// The storage driver looks at daemonConfig so just set it temporarily
-	daemonConfigLock.Lock()
-	oldValue := daemonConfig[key].Get()
-	daemonConfig[key].currentValue = value
-	daemonConfigLock.Unlock()
-
-	defer func() {
-		daemonConfigLock.Lock()
-		daemonConfig[key].currentValue = oldValue
-		daemonConfigLock.Unlock()
-	}()
-
-	// Update the current storage driver
-	err := d.SetupStorageDriver()
-	if err != nil {
-		return "", err
-	}
-
-	return value, nil
-}
-
 func daemonConfigSetAddress(d *Daemon, key string, value string) (string, error) {
 	// Update the current https address
 	err := d.UpdateHTTPsPort(value)
@@ -347,3 +328,7 @@ func daemonConfigValidateDefaultPool(d *Daemon, key string, value string) error
 
 	return nil
 }
+
+func storageDeprecatedKeys(d *Daemon, key string, value string) error {
+	return fmt.Errorf("Setting the key \"%s\" is deprecated on post storage-api LXD instances.", key)
+}
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 09bcbb7..e9a05f2 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -116,26 +116,6 @@ func storageLVMValidateThinPoolName(d *Daemon, vgName string, value string) erro
 	return nil
 }
 
-func storageLVMValidateVolumeGroupName(d *Daemon, key string, value string) error {
-	users, err := storageLVMGetThinPoolUsers(d)
-	if err != nil {
-		return fmt.Errorf("Error checking if a pool is already in use: %v", err)
-	}
-
-	if len(users) > 0 {
-		return fmt.Errorf("Can not change LVM config. Images or containers are still using LVs: %v", users)
-	}
-
-	if value != "" {
-		err = storageLVMCheckVolumeGroup(value)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
 func xfsGenerateNewUUID(lvpath string) error {
 	output, err := exec.Command(
 		"xfs_admin",

From 0774dc389271c11cc8976cef3ba4586e8aa8129c Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Mon, 16 Jan 2017 23:48:38 +0100
Subject: [PATCH 55/63] lxc/list: show storage pool of the container

This adds "b" as column option to show the storage pool the container's storage
volume belongs to.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxc/list.go | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/lxc/list.go b/lxc/list.go
index e492007..470e19c 100644
--- a/lxc/list.go
+++ b/lxc/list.go
@@ -433,6 +433,7 @@ func (c *listCmd) parseColumns() ([]column, error) {
 		'S': {i18n.G("SNAPSHOTS"), c.numberSnapshotsColumnData, false, true},
 		's': {i18n.G("STATE"), c.statusColumnData, false, false},
 		't': {i18n.G("TYPE"), c.typeColumnData, false, false},
+		'b': {i18n.G("STORAGE POOL"), c.StoragePoolColumnData, false, false},
 	}
 
 	if c.fast {
@@ -597,6 +598,10 @@ func (c *listCmd) ArchitectureColumnData(cInfo api.Container, cState *api.Contai
 	return cInfo.Architecture
 }
 
+func (c *listCmd) StoragePoolColumnData(cInfo api.Container, cState *api.ContainerState, cSnaps []api.ContainerSnapshot) string {
+	return cInfo.StoragePool
+}
+
 func (c *listCmd) ProfilesColumnData(cInfo api.Container, cState *api.ContainerState, cSnaps []api.ContainerSnapshot) string {
 	return strings.Join(cInfo.Profiles, "\n")
 }

From 1ab657243220e1f833e153ae04f496baabe839b7 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 5 Jan 2017 13:09:52 +0100
Subject: [PATCH 56/63] lxc/storage: add new storage api client tool

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxc/main.go    |   1 +
 lxc/storage.go | 861 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 862 insertions(+)
 create mode 100644 lxc/storage.go

diff --git a/lxc/main.go b/lxc/main.go
index 8dd8956..0ac0d9d 100644
--- a/lxc/main.go
+++ b/lxc/main.go
@@ -204,6 +204,7 @@ var commands = map[string]command{
 		name:       "stop",
 		timeout:    -1,
 	},
+	"storage": &storageCmd{},
 	"version": &versionCmd{},
 }
 
diff --git a/lxc/storage.go b/lxc/storage.go
new file mode 100644
index 0000000..4824cea
--- /dev/null
+++ b/lxc/storage.go
@@ -0,0 +1,861 @@
+package main
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"sort"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"github.com/olekukonko/tablewriter"
+	"gopkg.in/yaml.v2"
+
+	"github.com/lxc/lxd"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/i18n"
+	"github.com/lxc/lxd/shared/termios"
+)
+
+// This command is only allowed to create custom storage volumes.
+const defaultStoragePoolVolumeType = "custom"
+
+type storageCmd struct {
+}
+
+func (c *storageCmd) showByDefault() bool {
+	return true
+}
+
+func (c *storageCmd) storagePoolEditHelp() string {
+	return i18n.G(
+		`### This is a yaml representation of a storage pool.
+### Any line starting with a '# will be ignored.
+###
+### A storage pool consists of a set of configuration items.
+###
+### An example would look like:
+### pool_name: default
+### driver: zfs
+### used_by: []
+### config:
+###   size: "61203283968"
+###   source: /home/chb/mnt/lxd_test/default.img
+###   zfs.pool_name: default`)
+}
+
+func (c *storageCmd) storagePoolVolumeEditHelp() string {
+	return i18n.G(
+		`### This is a yaml representation of a storage volume.
+### Any line starting with a '# will be ignored.
+###
+### A storage volume consists of a set of configuration items.
+###
+### volume_name: vol1
+### volume_type: custom
+### used_by: []
+### config:
+###   size: "61203283968"`)
+}
+
+func (c *storageCmd) usage() string {
+	return i18n.G(
+		`Manage storage.
+
+lxc storage list [<remote>:]                           List available storage pools.
+lxc storage show [<remote>:]<pool>                     Show details of a storage pool.
+lxc storage create [<remote>:]<pool> [key=value]...    Create a storage pool.
+lxc storage get [<remote>:]<pool> <key>                Get storage pool configuration.
+lxc storage set [<remote>:]<pool> <key> <value>        Set storage pool configuration.
+lxc storage unset [<remote>:]<pool> <key>              Unset storage pool configuration.
+lxc storage delete [<remote>:]<pool>                   Delete a storage pool.
+lxc storage edit [<remote>:]<pool>
+    Edit storage pool, either by launching external editor or reading STDIN.
+    Example: lxc storage edit [<remote>:]<pool> # launch editor
+             cat pool.yaml | lxc storage edit [<remote>:]<pool> # read from pool.yaml
+
+lxc storage attach [<remote>:]<pool> <container> [device name]
+lxc storage attach-profile [<remote:>]<pool> <profile> [device name]
+
+lxc storage detach <pool> <container> [device name]
+lxc storage detach-profile <pool> <container> [device name]
+
+lxc storage volume list [<remote>:]<pool>                              List available storage volumes on a storage pool.
+lxc storage volume show [<remote>:]<pool> <volume>		       Show details of a storage volume on a storage pool.
+lxc storage volume create [<remote>:]<pool> <volume> [key=value]...    Create a storage volume on a storage pool.
+lxc storage volume get [<remote>:]<pool> <volume> <key>                Get storage volume configuration on a storage pool.
+lxc storage volume set [<remote>:]<pool> <volume> <key> <value>        Set storage volume configuration on a storage pool.
+lxc storage volume unset [<remote>:]<pool> <volume> <key>              Unset storage volume configuration on a storage pool.
+lxc storage volume delete [<remote>:]<pool> <volume>                   Delete a storage volume on a storage pool.
+lxc storage volume edit [<remote>:]<pool> <volume>
+    Edit storage pool, either by launching external editor or reading STDIN.
+    Example: lxc storage volume edit [<remote>:]<pool> <volume> # launch editor
+             cat pool.yaml | lxc storage volume edit [<remote>:]<pool> <volume> # read from pool.yaml
+
+lxc storage volume attach [<remote>:]<pool> <volume> <container> [device name] <path>
+lxc storage volume attach-profile [<remote:>]<pool> <volume> <profile> [device name] <path>
+
+lxc storage volume detach [<remote>:]<pool> <volume> <container> [device name]
+lxc storage volume detach-profile [<remote:>]<pool> <volume> <profile> [device name]
+`)
+}
+
+func (c *storageCmd) flags() {}
+
+func (c *storageCmd) run(config *lxd.Config, args []string) error {
+	if len(args) < 1 {
+		return errArgs
+	}
+
+	if args[0] == "list" {
+		return c.doStoragePoolsList(config, args)
+	}
+
+	if len(args) < 2 {
+		return errArgs
+	}
+
+	remote, sub := config.ParseRemoteAndContainer(args[1])
+	client, err := lxd.NewClient(config, remote)
+	if err != nil {
+		return err
+	}
+
+	if args[0] == "volume" {
+		switch args[1] {
+		case "attach":
+			if len(args) < 5 {
+				return errArgs
+			}
+			pool := args[2]
+			volume := args[3]
+			return c.doStoragePoolVolumeAttach(client, pool, volume, args[4:])
+		case "attach-profile":
+			if len(args) < 5 {
+				return errArgs
+			}
+			pool := args[2]
+			volume := args[3]
+			return c.doStoragePoolVolumeAttachProfile(client, pool, volume, args[4:])
+		case "create":
+			if len(args) < 4 {
+				return errArgs
+			}
+			pool := args[2]
+			volume := args[3]
+			return c.doStoragePoolVolumeCreate(client, pool, volume, args[4:])
+		case "delete":
+			if len(args) != 4 {
+				return errArgs
+			}
+			pool := args[2]
+			volume := args[3]
+			return c.doStoragePoolVolumeDelete(client, pool, volume)
+		case "detach":
+			if len(args) < 4 {
+				return errArgs
+			}
+			pool := args[2]
+			volume := args[3]
+			return c.doStoragePoolVolumeDetach(client, pool, volume, args[4:])
+		case "detach-profile":
+			if len(args) < 5 {
+				return errArgs
+			}
+			pool := args[2]
+			volume := args[3]
+			return c.doStoragePoolVolumeDetachProfile(client, pool, volume, args[4:])
+		case "edit":
+			if len(args) != 4 {
+				return errArgs
+			}
+			pool := args[2]
+			volume := args[3]
+			return c.doStoragePoolVolumeEdit(client, pool, volume)
+		case "get":
+			if len(args) < 4 {
+				return errArgs
+			}
+			pool := args[2]
+			volume := args[3]
+			return c.doStoragePoolVolumeGet(client, pool, volume, args[3:])
+		case "list":
+			if len(args) != 3 {
+				return errArgs
+			}
+			pool := args[2]
+			return c.doStoragePoolVolumesList(config, remote, pool, args)
+		case "set":
+			if len(args) < 4 {
+				return errArgs
+			}
+			pool := args[2]
+			volume := args[3]
+			return c.doStoragePoolVolumeSet(client, pool, volume, args[3:])
+		case "unset":
+			if len(args) < 4 {
+				return errArgs
+			}
+			pool := args[2]
+			volume := args[3]
+			return c.doStoragePoolVolumeSet(client, pool, volume, args[3:])
+		case "show":
+			if len(args) != 4 {
+				return errArgs
+			}
+			pool := args[2]
+			volume := args[3]
+			return c.doStoragePoolVolumeShow(client, pool, volume)
+		default:
+			return errArgs
+		}
+	} else {
+		pool := sub
+		switch args[0] {
+		case "create":
+			if len(args) < 3 {
+				return errArgs
+			}
+			driver := strings.Join(args[2:3], "")
+			return c.doStoragePoolCreate(client, pool, driver, args[3:])
+		case "delete":
+			return c.doStoragePoolDelete(client, pool)
+		case "edit":
+			return c.doStoragePoolEdit(client, pool)
+		case "get":
+			if len(args) < 2 {
+				return errArgs
+			}
+			return c.doStoragePoolGet(client, pool, args[2:])
+		case "list":
+			if len(args) != 4 {
+				return errArgs
+			}
+			pool := args[2]
+			volumeType := args[3]
+			return c.doStoragePoolVolumesTypeList(config, remote, pool, volumeType, args)
+		case "set":
+			if len(args) < 2 {
+				return errArgs
+			}
+			return c.doStoragePoolSet(client, pool, args[2:])
+		case "unset":
+			if len(args) < 2 {
+				return errArgs
+			}
+			return c.doStoragePoolSet(client, pool, args[2:])
+		case "show":
+			if len(args) < 2 {
+				return errArgs
+			}
+			return c.doStoragePoolShow(client, pool)
+		default:
+			return errArgs
+		}
+	}
+}
+
+func (c *storageCmd) doStoragePoolVolumeAttach(client *lxd.Client, pool string, volume string, args []string) error {
+	if len(args) < 2 || len(args) > 3 {
+		return errArgs
+	}
+
+	container := args[0]
+	devPath := ""
+	devName := ""
+	if len(args) == 2 {
+		// COMMENT(brauner): Only the path has been given to us.
+		devPath = args[1]
+		devName = volume
+	} else if len(args) == 3 {
+		// COMMENT(brauner): Path and device name have been given to us.
+		devName = args[1]
+		devPath = args[2]
+	}
+
+	// COMMENT(brauner): Check if the requested storage volume actually
+	// exists on the requested storage pool.
+	vol, err := client.StoragePoolVolumeTypeGet(pool, volume, defaultStoragePoolVolumeType)
+	if err != nil {
+		return err
+	}
+
+	props := []string{fmt.Sprintf("pool=%s", pool), fmt.Sprintf("path=%s", devPath), fmt.Sprintf("source=%s", vol.VolumeName)}
+	fmt.Println(props)
+	resp, err := client.ContainerDeviceAdd(container, devName, "disk", props)
+	if err != nil {
+		return err
+	}
+
+	return client.WaitForSuccess(resp.Operation)
+}
+
+func (c *storageCmd) doStoragePoolVolumeDetach(client *lxd.Client, pool string, volume string, args []string) error {
+	if len(args) < 1 || len(args) > 2 {
+		return errArgs
+	}
+
+	containerName := args[0]
+	devName := ""
+	if len(args) == 2 {
+		devName = args[1]
+	}
+
+	container, err := client.ContainerInfo(containerName)
+	if err != nil {
+		return err
+	}
+
+	if devName == "" {
+		for n, d := range container.Devices {
+			if d["type"] == "disk" && d["pool"] == pool && d["source"] == volume {
+				if devName != "" {
+					return fmt.Errorf(i18n.G("More than one device matches, specify the device name."))
+				}
+
+				devName = n
+			}
+		}
+	}
+
+	if devName == "" {
+		return fmt.Errorf(i18n.G("No device found for this storage volume."))
+	}
+
+	_, ok := container.Devices[devName]
+	if !ok {
+		return fmt.Errorf(i18n.G("The specified device doesn't exist"))
+	}
+
+	resp, err := client.ContainerDeviceDelete(containerName, devName)
+	if err != nil {
+		return err
+	}
+
+	return client.WaitForSuccess(resp.Operation)
+}
+
+func (c *storageCmd) doStoragePoolVolumeAttachProfile(client *lxd.Client, pool string, volume string, args []string) error {
+	if len(args) < 2 || len(args) > 3 {
+		return errArgs
+	}
+
+	profile := args[0]
+	devPath := ""
+	devName := ""
+	if len(args) == 2 {
+		// COMMENT(brauner): Only the path has been given to us.
+		devPath = args[1]
+		devName = volume
+	} else if len(args) == 3 {
+		// COMMENT(brauner): Path and device name have been given to us.
+		devName = args[1]
+		devPath = args[2]
+	}
+
+	// COMMENT(brauner): Check if the requested storage volume actually
+	// exists on the requested storage pool.
+	vol, err := client.StoragePoolVolumeTypeGet(pool, volume, defaultStoragePoolVolumeType)
+	if err != nil {
+		return err
+	}
+
+	props := []string{fmt.Sprintf("pool=%s", pool), fmt.Sprintf("path=%s", devPath), fmt.Sprintf("source=%s", vol.VolumeName)}
+	fmt.Println(props)
+
+	_, err = client.ProfileDeviceAdd(profile, devName, "disk", props)
+	return err
+}
+
+func (c *storageCmd) doStoragePoolCreate(client *lxd.Client, name string, driver string, args []string) error {
+	config := map[string]string{}
+
+	for i := 0; i < len(args); i++ {
+		entry := strings.SplitN(args[i], "=", 2)
+		if len(entry) < 2 {
+			return errArgs
+		}
+		config[entry[0]] = entry[1]
+	}
+
+	err := client.StoragePoolCreate(name, driver, config)
+	if err == nil {
+		fmt.Printf(i18n.G("Storage pool %s created")+"\n", name)
+	}
+
+	return err
+}
+
+func (c *storageCmd) doStoragePoolVolumeDetachProfile(client *lxd.Client, pool string, volume string, args []string) error {
+	if len(args) < 1 || len(args) > 2 {
+		return errArgs
+	}
+
+	profileName := args[0]
+	devName := ""
+	if len(args) > 1 {
+		devName = args[1]
+	}
+
+	profile, err := client.ProfileConfig(profileName)
+	if err != nil {
+		return err
+	}
+
+	if devName == "" {
+		for n, d := range profile.Devices {
+			if d["type"] == "disk" && d["pool"] == pool && d["source"] == volume {
+				if devName != "" {
+					return fmt.Errorf(i18n.G("More than one device matches, specify the device name."))
+				}
+
+				devName = n
+			}
+		}
+	}
+
+	if devName == "" {
+		return fmt.Errorf(i18n.G("No device found for this storage volume."))
+	}
+
+	_, ok := profile.Devices[devName]
+	if !ok {
+		return fmt.Errorf(i18n.G("The specified device doesn't exist"))
+	}
+
+	_, err = client.ProfileDeviceDelete(profileName, devName)
+	return err
+}
+
+func (c *storageCmd) doStoragePoolDelete(client *lxd.Client, name string) error {
+	err := client.StoragePoolDelete(name)
+	if err == nil {
+		fmt.Printf(i18n.G("Storage pool %s deleted")+"\n", name)
+	}
+
+	return err
+}
+
+func (c *storageCmd) doStoragePoolEdit(client *lxd.Client, name string) error {
+	// If stdin isn't a terminal, read text from it
+	if !termios.IsTerminal(int(syscall.Stdin)) {
+		contents, err := ioutil.ReadAll(os.Stdin)
+		if err != nil {
+			return err
+		}
+
+		newdata := api.StoragePool{}
+		err = yaml.Unmarshal(contents, &newdata)
+		if err != nil {
+			return err
+		}
+		return client.StoragePoolPut(name, newdata)
+	}
+
+	// Extract the current value
+	pool, err := client.StoragePoolGet(name)
+	if err != nil {
+		return err
+	}
+
+	data, err := yaml.Marshal(&pool)
+	if err != nil {
+		return err
+	}
+
+	// Spawn the editor
+	content, err := shared.TextEditor("", []byte(c.storagePoolEditHelp()+"\n\n"+string(data)))
+	if err != nil {
+		return err
+	}
+
+	for {
+		// Parse the text received from the editor
+		newdata := api.StoragePool{}
+		err = yaml.Unmarshal(content, &newdata)
+		if err == nil {
+			err = client.StoragePoolPut(name, newdata)
+		}
+
+		// Respawn the editor
+		if err != nil {
+			fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err)
+			fmt.Println(i18n.G("Press enter to open the editor again"))
+
+			_, err := os.Stdin.Read(make([]byte, 1))
+			if err != nil {
+				return err
+			}
+
+			content, err = shared.TextEditor("", content)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		break
+	}
+	return nil
+}
+
+func (c *storageCmd) doStoragePoolGet(client *lxd.Client, name string, args []string) error {
+	// we shifted @args so so it should read "<key>"
+	if len(args) != 1 {
+		return errArgs
+	}
+
+	resp, err := client.StoragePoolGet(name)
+	if err != nil {
+		return err
+	}
+
+	for k, v := range resp.PoolConfig {
+		if k == args[0] {
+			fmt.Printf("%s\n", v)
+		}
+	}
+	return nil
+}
+
+func (c *storageCmd) doStoragePoolsList(config *lxd.Config, args []string) error {
+	var remote string
+	if len(args) > 1 {
+		var name string
+		remote, name = config.ParseRemoteAndContainer(args[1])
+		if name != "" {
+			return fmt.Errorf(i18n.G("Cannot provide container name to list"))
+		}
+	} else {
+		remote = config.DefaultRemote
+	}
+
+	client, err := lxd.NewClient(config, remote)
+	if err != nil {
+		return err
+	}
+
+	pools, err := client.ListStoragePools()
+	if err != nil {
+		return err
+	}
+
+	data := [][]string{}
+	for _, pool := range pools {
+		size := pool.PoolConfig["size"]
+		sz, err := strconv.ParseUint(pool.PoolConfig["size"], 10, 64)
+		if err == nil {
+			size = shared.GetByteSizeString(int64(sz), 0)
+		}
+
+		usedby := strconv.Itoa(len(pool.PoolUsedBy))
+
+		data = append(data, []string{pool.PoolName, pool.PoolDriver, size, pool.PoolConfig["source"], usedby})
+	}
+
+	table := tablewriter.NewWriter(os.Stdout)
+	table.SetAutoWrapText(false)
+	table.SetAlignment(tablewriter.ALIGN_LEFT)
+	table.SetRowLine(true)
+	table.SetHeader([]string{
+		i18n.G("NAME"),
+		i18n.G("DRIVER"),
+		i18n.G("SIZE"),
+		i18n.G("SOURCE"),
+		i18n.G("USED BY")})
+	sort.Sort(byName(data))
+	table.AppendBulk(data)
+	table.Render()
+
+	return nil
+}
+
+func (c *storageCmd) doStoragePoolSet(client *lxd.Client, name string, args []string) error {
+	// we shifted @args so so it should read "<key> [<value>]"
+	if len(args) < 1 {
+		return errArgs
+	}
+
+	pool, err := client.StoragePoolGet(name)
+	if err != nil {
+		return err
+	}
+
+	key := args[0]
+	var value string
+	if len(args) < 2 {
+		value = ""
+	} else {
+		value = args[1]
+	}
+
+	if !termios.IsTerminal(int(syscall.Stdin)) && value == "-" {
+		buf, err := ioutil.ReadAll(os.Stdin)
+		if err != nil {
+			return fmt.Errorf("Can't read from stdin: %s", err)
+		}
+		value = string(buf[:])
+	}
+
+	pool.PoolConfig[key] = value
+
+	return client.StoragePoolPut(name, pool)
+}
+
+func (c *storageCmd) doStoragePoolShow(client *lxd.Client, name string) error {
+	pool, err := client.StoragePoolGet(name)
+	if err != nil {
+		return err
+	}
+
+	sz, err := strconv.ParseUint(pool.PoolConfig["size"], 10, 64)
+	if err == nil {
+		pool.PoolConfig["size"] = shared.GetByteSizeString(int64(sz), 0)
+	}
+
+	sort.Strings(pool.PoolUsedBy)
+
+	data, err := yaml.Marshal(&pool)
+	fmt.Printf("%s", data)
+
+	return nil
+}
+
+func (c *storageCmd) doStoragePoolVolumesList(config *lxd.Config, remote string, pool string, args []string) error {
+	client, err := lxd.NewClient(config, remote)
+	if err != nil {
+		return err
+	}
+
+	volumes, err := client.StoragePoolVolumesList(pool)
+	if err != nil {
+		return err
+	}
+
+	data := [][]string{}
+	for _, volume := range volumes {
+		usedby := strconv.Itoa(len(volume.VolumeUsedBy))
+
+		shortName := volume.VolumeName
+		if volume.VolumeType == "image" {
+			shortName = volume.VolumeName[0:12]
+		}
+
+		data = append(data, []string{shortName, volume.VolumeType, usedby})
+	}
+
+	table := tablewriter.NewWriter(os.Stdout)
+	table.SetAutoWrapText(false)
+	table.SetAlignment(tablewriter.ALIGN_LEFT)
+	table.SetRowLine(true)
+	table.SetHeader([]string{
+		i18n.G("NAME"),
+		i18n.G("TYPE"),
+		i18n.G("USED BY")})
+	sort.Sort(byName(data))
+	table.AppendBulk(data)
+	table.Render()
+
+	return nil
+}
+
+func (c *storageCmd) doStoragePoolVolumesTypeList(config *lxd.Config, remote string, pool string, volumeType string, args []string) error {
+	client, err := lxd.NewClient(config, remote)
+	if err != nil {
+		return err
+	}
+
+	volumes, err := client.StoragePoolVolumesTypeList(pool, volumeType)
+	if err != nil {
+		return err
+	}
+
+	data := [][]string{}
+	for _, volume := range volumes {
+		size := volume.VolumeConfig["size"]
+		sz, err := strconv.ParseUint(volume.VolumeConfig["size"], 10, 64)
+		if err == nil {
+			size = shared.GetByteSizeString(int64(sz), 0)
+		}
+
+		usedby := strconv.Itoa(len(volume.VolumeUsedBy))
+
+		data = append(data, []string{volume.VolumeName, volume.VolumeType, size, usedby})
+	}
+
+	table := tablewriter.NewWriter(os.Stdout)
+	table.SetAutoWrapText(false)
+	table.SetAlignment(tablewriter.ALIGN_LEFT)
+	table.SetRowLine(true)
+	table.SetHeader([]string{
+		i18n.G("NAME"),
+		i18n.G("TYPE"),
+		i18n.G("SIZE"),
+		i18n.G("USED BY")})
+	sort.Sort(byName(data))
+	table.AppendBulk(data)
+	table.Render()
+
+	return nil
+}
+
+func (c *storageCmd) doStoragePoolVolumeCreate(client *lxd.Client, pool string, volume string, args []string) error {
+	config := map[string]string{}
+
+	for i := 0; i < len(args); i++ {
+		entry := strings.SplitN(args[i], "=", 2)
+		if len(entry) < 2 {
+			return errArgs
+		}
+		config[entry[0]] = entry[1]
+	}
+
+	err := client.StoragePoolVolumeTypeCreate(pool, volume, defaultStoragePoolVolumeType, config)
+	if err == nil {
+		fmt.Printf(i18n.G("Storage volume %s created")+"\n", volume)
+	}
+
+	return err
+}
+
+func (c *storageCmd) doStoragePoolVolumeDelete(client *lxd.Client, pool string, volume string) error {
+	err := client.StoragePoolVolumeTypeDelete(pool, volume, defaultStoragePoolVolumeType)
+	if err == nil {
+		fmt.Printf(i18n.G("Storage volume %s deleted")+"\n", volume)
+	}
+
+	return err
+}
+
+func (c *storageCmd) doStoragePoolVolumeGet(client *lxd.Client, pool string, volume string, args []string) error {
+	// we shifted @args so so it should read "<key>"
+	if len(args) != 2 {
+		return errArgs
+	}
+
+	resp, err := client.StoragePoolVolumeTypeGet(pool, volume, defaultStoragePoolVolumeType)
+	if err != nil {
+		return err
+	}
+
+	for k, v := range resp.VolumeConfig {
+		if k == args[1] {
+			fmt.Printf("%s\n", v)
+		}
+	}
+	return nil
+}
+
+func (c *storageCmd) doStoragePoolVolumeSet(client *lxd.Client, pool string, volume string, args []string) error {
+	// we shifted @args so so it should read "<key> [<value>]"
+	if len(args) < 2 {
+		return errArgs
+	}
+
+	volumeConfig, err := client.StoragePoolVolumeTypeGet(pool, volume, defaultStoragePoolVolumeType)
+	if err != nil {
+		return err
+	}
+
+	key := args[1]
+	var value string
+	if len(args) < 3 {
+		value = ""
+	} else {
+		value = args[2]
+	}
+
+	if !termios.IsTerminal(int(syscall.Stdin)) && value == "-" {
+		buf, err := ioutil.ReadAll(os.Stdin)
+		if err != nil {
+			return fmt.Errorf("Can't read from stdin: %s", err)
+		}
+		value = string(buf[:])
+	}
+
+	volumeConfig.VolumeConfig[key] = value
+
+	return client.StoragePoolVolumeTypePut(pool, volume, defaultStoragePoolVolumeType, volumeConfig)
+}
+
+func (c *storageCmd) doStoragePoolVolumeShow(client *lxd.Client, pool string, volume string) error {
+	volumeStruct, err := client.StoragePoolVolumeTypeGet(pool, volume, defaultStoragePoolVolumeType)
+	if err != nil {
+		return err
+	}
+
+	sz, err := strconv.ParseUint(volumeStruct.VolumeConfig["size"], 10, 64)
+	if err == nil {
+		volumeStruct.VolumeConfig["size"] = shared.GetByteSizeString(int64(sz), 0)
+	}
+
+	sort.Strings(volumeStruct.VolumeUsedBy)
+
+	data, err := yaml.Marshal(&volumeStruct)
+	fmt.Printf("%s", data)
+
+	return nil
+}
+
+func (c *storageCmd) doStoragePoolVolumeEdit(client *lxd.Client, pool string, volume string) error {
+	// If stdin isn't a terminal, read text from it
+	if !termios.IsTerminal(int(syscall.Stdin)) {
+		contents, err := ioutil.ReadAll(os.Stdin)
+		if err != nil {
+			return err
+		}
+
+		newdata := api.StorageVolume{}
+		err = yaml.Unmarshal(contents, &newdata)
+		if err != nil {
+			return err
+		}
+		return client.StoragePoolVolumeTypePut(pool, volume, defaultStoragePoolVolumeType, newdata)
+	}
+
+	// Extract the current value
+	vol, err := client.StoragePoolVolumeTypeGet(pool, volume, defaultStoragePoolVolumeType)
+	if err != nil {
+		return err
+	}
+
+	data, err := yaml.Marshal(&vol)
+	if err != nil {
+		return err
+	}
+
+	// Spawn the editor
+	content, err := shared.TextEditor("", []byte(c.storagePoolVolumeEditHelp()+"\n\n"+string(data)))
+	if err != nil {
+		return err
+	}
+
+	for {
+		// Parse the text received from the editor
+		newdata := api.StorageVolume{}
+		err = yaml.Unmarshal(content, &newdata)
+		if err == nil {
+			err = client.StoragePoolVolumeTypePut(pool, volume, defaultStoragePoolVolumeType, newdata)
+		}
+
+		// Respawn the editor
+		if err != nil {
+			fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err)
+			fmt.Println(i18n.G("Press enter to open the editor again"))
+
+			_, err := os.Stdin.Read(make([]byte, 1))
+			if err != nil {
+				return err
+			}
+
+			content, err = shared.TextEditor("", content)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		break
+	}
+	return nil
+}

From 47651e595b2bb8a74580921a91bf85ab8109a2bd Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 1 Feb 2017 17:24:08 +0100
Subject: [PATCH 57/63] api-extensions: add storage api

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 doc/api-extensions.md | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 32a6e43..d4f197c 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -176,3 +176,30 @@ dnsmasq is enabled on the bridge.
 
 ## network\_routes
 Introduces "ipv4.routes" and "ipv6.routes" which allow routing additional subnets to a LXD bridge.
+
+## storage
+Storage management API for LXD.
+
+This includes:
+* GET /1.0/storage-pools
+* POST /1.0/storage-pools (see rest-api.md for details)
+
+* GET /1.0/storage-pools/<pool_name> (see rest-api.md for details)
+* POST /1.0/storage-pools/<pool_name> (see rest-api.md for details)
+* PUT /1.0/storage-pools/<pool_name> (see rest-api.md for details)
+* PATCH /1.0/storage-pools/<pool_name> (see rest-api.md for details)
+* DELETE /1.0/storage-pools/<pool_name> (see rest-api.md for details)
+
+* GET /1.0/storage-pools/<pool_name>/volumes (see rest-api.md for details)
+
+* GET /1.0/storage-pools/<pool_name>/volumes/<volume_type> (see rest-api.md for details)
+* POST /1.0/storage-pools/<pool_name>/volumes/<volume_type> (see rest-api.md for details)
+
+* GET /1.0/storage-pools/<pool_name>/volumes/<volume_type>/<volume_name> (see rest-api.md for details)
+* POST /1.0/storage-pools/<pool_name>/volumes/<volume_type>/<volume_name> (see rest-api.md for details)
+* PUT /1.0/storage-pools/<pool_name>/volumes/<volume_type>/<volume_name> (see rest-api.md for details)
+* PATCH /1.0/storage-pools/<pool_name>/volumes/<volume_type>/<volume_name> (see rest-api.md for details)
+* DELETE /1.0/storage-pools/<pool_name>/volumes/<volume_type>/<volume_name> (see rest-api.md for details)
+
+- All storage configuration options (see configuration.md for details)
+* Addition of "storage_pool" property to target a specific storage pool.

From e477bec246d8c7712c6997eaaa294d1386fef1a6 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 1 Feb 2017 17:24:24 +0100
Subject: [PATCH 58/63] configuration: add storage api

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 doc/configuration.md | 41 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 41 insertions(+)

diff --git a/doc/configuration.md b/doc/configuration.md
index 74c4721..3c4bc8a 100644
--- a/doc/configuration.md
+++ b/doc/configuration.md
@@ -36,10 +36,12 @@ storage.lvm\_volume\_size       | string    | 10GiB     | -
 storage.zfs\_pool\_name         | string    | -         | -                                 | ZFS pool name
 storage.zfs\_remove\_snapshots  | boolean   | false     | storage\_zfs\_remove\_snapshots   | Automatically remove any needed snapshot when attempting a container restore
 storage.zfs\_use\_refquota      | boolean   | false     | storage\_zfs\_use\_refquota       | Don't include snapshots as part of container quota (size property) or in reported disk usage
+storage.default_pool            | string    | ""        | storage                           | The default storage pool on which to create containers.
 images.compression\_algorithm   | string    | gzip      | -                                 | Compression algorithm to use for new images (bzip2, gzip, lzma, xz or none)
 images.remote\_cache\_expiry    | integer   | 10        | -                                 | Number of days after which an unused cached remote image will be flushed
 images.auto\_update\_interval   | integer   | 6         | -                                 | Interval in hours at which to look for update to cached images (0 disables it)
 images.auto\_update\_cached     | boolean   | true      | -                                 | Whether to automatically update any image that LXD caches
+images.default\_storage\_pool   | string    | -         | storage                           | The default storage pool on which to create images.
 
 Those keys can be set using the lxc tool with:
 
@@ -374,3 +376,42 @@ raw.dnsmasq                     | string    | -                     | -
 Those keys can be set using the lxc tool with:
 
     lxc network set <network> <key> <value>
+
+# Storage configuration
+LXD supports creating and managing storage pools and storage volumes.
+General keys are top-level. Driver specific keys are namespaced by driver name.
+Volume keys apply to any volume created in the pool unless the value is
+overridden on a per-volume basis.
+
+## Storage pool configuration
+
+Key                         | Type   | Condition                     | Default          | Description
+:--                         | :--    | :--                           | :--              | :--
+driver                      | string | -		             | ""               | Name of the storage driver
+source                      | string | -                             | ""               | Path to block device or loop file or filesystem entry
+size                        | string | appropriate driver and source | 0                | Size of the storage pool in bytes (suffixes supported). (Currently valid for loop based pools and zfs.)
+volume.block.mount_options  | string | appropriate driver            | discard          | Mount options for block devices
+volume.block.filesystem     | string | appropriate driver            | ext4             | Filesystem to use for new volumes
+volume.size                 | string | appropriate driver            | 0                | Default volume size
+volume.zfs.use_refquota     | bool   | zfs driver                    | false            | Use refquota instead of quota for space.
+volume.zfs.remove_snapshots | bool   | zfs driver                    | false            | Remove snapshots as needed
+volume.lvm.thinpool_name    | string | lvm driver                    | LXDPool          | Thin pool where images and containers are created.
+zfs.pool_name               | string | zfs driver                    | name of the pool | Name of the zpool
+
+Storage pool configuration keys can be set using the lxc tool with:
+
+    lxc storage set [<remote>:]<pool> <key> <value>
+
+## Storage volume configuration
+
+Key                  | Type   | Condition          | Default                             | Description
+:--                  | :--    | :--                | :--                                 | :--
+size                 | string | appropriate driver | 0                                   | Mount options for block devices
+block.mount_options  | string | appropriate driver | discard                             | Name of the storage driver
+block.filesystem     | string | appropriate driver | ext4                                | Path to block device or loop file or filesystem entry
+zfs.use_refquota     | string | zfs driver         | same as volume.zfs.zfs_requota      | Filesystem to use for new volumes
+zfs.remove_snapshots | string | zfs driver         | same as volume.zfs.remove_snapshots | Default volume size
+
+Storage volume configuration keys can be set using the lxc tool with:
+
+    lxc storage volume set [<remote>:]<pool> <volume> <key> <value>

From e7b6ead2f012becbef1aaa7bf05721bad1e58359 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 1 Feb 2017 18:43:31 +0100
Subject: [PATCH 59/63] rest-api: add storage api

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 doc/rest-api.md | 284 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 284 insertions(+)

diff --git a/doc/rest-api.md b/doc/rest-api.md
index a017863..b87d60f 100644
--- a/doc/rest-api.md
+++ b/doc/rest-api.md
@@ -1818,3 +1818,287 @@ Input (none at present):
     }
 
 HTTP code for this should be 202 (Accepted).
+
+## /1.0/storage-pools
+
+### GET
+
+ * Description: list of storage pools
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: list of storage pools that are currently defined on the host
+
+
+    [
+        {
+            "pool_name": "pool4",
+            "driver": "btrfs",
+            "used_by": [],
+            "config": {
+                "size": "61205515468",
+                "source": "/home/chb/mnt/lxd_clean/disks/pool4.img",
+                "volume.size": "0"
+            }
+        },
+        {
+            "pool_name": "test",
+            "driver": "lvm",
+            "used_by": [
+                "/1.0/containers/alp1"
+                "/1.0/snapshots/alp1/snap0",
+                "/1.0/snapshots/alp1/snap1",
+                "/1.0/snapshots/alp1/snap2",
+                "/1.0/containers/alp2",
+                "/1.0/containers/alp3",
+                "/1.0/images/cedce20b5b236f1071134beba7a5fd2aa923fda49eea4c66454dd559a5d6e906"
+            ]
+        }
+    ]
+
+### POST
+
+ * Description: create a new storage pool
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+Input:
+
+    {
+        "config": {
+            "size": "10GB"
+        },
+        "driver": "zfs",
+        "pool_name": "pool1"
+    }
+
+## /1.0/storage-pools/<pool_name>
+
+### GET
+
+ * Description: information about a storage pool
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: dict representing a storage pool
+
+    {
+        "type": "sync",
+        "status": "success",
+        "status_code": 200,
+        "operation": "",
+        "error_code": 0,
+        "error": "",
+        "metadata": {
+            "pool_name": "pool1",
+            "driver": "lvm",
+            "used_by": [
+                "/1.0/containers/alp1",
+                "/1.0/snapshots/alp1/snap0",
+                "/1.0/snapshots/alp1/snap1",
+                "/1.0/snapshots/alp1/snap2",
+                "/1.0/containers/alp2",
+                "/1.0/containers/alp3",
+                "/1.0/images/cedce20b5b236f1071134beba7a5fd2aa923fda49eea4c66454dd559a5d6e906",
+                "/1.0/custom/vol1"
+                ],
+            "config": {
+                "size": "15032385536",
+                "source": "pool1",
+                "volume.block.filesystem": "xfs",
+                "volume.block.mount_options": "discard",
+                "volume.lvm.thinpool_name": "lxdpool",
+                "volume.size": "10737418240"
+            }
+        }
+    }
+
+
+### POST
+
+ * Description: rename a storage pool
+ - Currently not implemented.
+
+### PUT (ETag supported)
+
+ * Description: replace the storage pool information
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+ Input:
+
+    {
+        "config": {
+            "size": "15032385536",
+            "source": "pool1",
+            "volume.block.filesystem": "xfs",
+            "volume.block.mount_options": "discard",
+            "volume.lvm.thinpool_name": "LXDPool",
+            "volume.size": "10737418240"
+        }
+    }
+
+### PATCH
+
+ * Description: update the storage pool configuration
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+Input:
+
+    {
+        "config": {
+            "volume.block.filesystem": "xfs",
+        }
+    }
+
+### DELETE
+
+ * Description: delete a storage pool
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+Input (none at present):
+
+    {
+    }
+
+## /1.0/storage-pools/<pool_name>/volumes
+
+### GET
+
+ * Description: list of storage volumes
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: list of storage volumes that currently exist on a given storage pool
+
+    [
+        {
+            "volume_type": "container",
+            "used_by": [],
+            "volume_name": "alp1/snap1",
+            "config": {}
+        },
+        {
+            "volume_type": "container",
+            "used_by": [],
+            "volume_name": "alp1/snap2",
+            "config": {}
+        },
+    ]
+
+## /1.0/storage-pools/<pool_name>/volumes/<volume_type>
+
+### POST
+
+ * Description: create a new storage volume on a given storage pool
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+Input:
+
+    {
+        "config": {},
+        "pool_name": "pool1",
+        "volume_name": "vol1",
+        "volume_type": "custom"
+    }
+
+
+## /1.0/storage-pools/<pool_name>/volumes/<volume_type>/<volume_name>
+
+### GET
+
+ * Description: information about a storage volume of a given type on a storage pool
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: dict representing a storage volume
+
+    {
+        "type": "sync",
+        "status": "Success",
+        "status_code": 200,
+        "operation": "",
+        "error_code": 0,
+        "error": "",
+        "metadata": {
+            "volume_type": "custom",
+            "used_by": [],
+            "volume_name": "vol1",
+            "config": {
+                "block.filesystem": "ext4",
+                "block.mount_options": "discard",
+                "lvm.thinpool_name": "LXDPool",
+                "size": "10737418240"
+            }
+        }
+    }
+
+
+### POST
+
+ * Description: rename a storage volume of a given type on a storage pool
+ - Currently not implemented.
+
+### PUT (ETag supported)
+
+ * Description: replace the storage volume information
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+ Input:
+
+    {
+        "config": {
+            "size": "15032385536",
+            "source": "pool1",
+            "used_by": "",
+            "volume.block.filesystem": "xfs",
+            "volume.block.mount_options": "discard",
+            "volume.lvm.thinpool_name": "LXDPool",
+            "volume.size": "10737418240"
+        }
+    }
+
+### PATCH  (ETag supported)
+
+ * Description: update the storage volume information
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+ Input:
+
+    {
+        "config": {
+            "volume.block.mount_options": "",
+        }
+    }
+
+### DELETE
+
+ * Description: delete a storage volume of a given type on a given storage pool
+ * Introduced: with API extension "storage"
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+Input (none at present):
+
+    {
+    }

From ea9bb899fa5ab4740871ba803d34c46d54e2a2e7 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 11 Jan 2017 12:49:04 +0100
Subject: [PATCH 60/63] silence lxd-benchmark

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 test/lxd-benchmark/main.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/test/lxd-benchmark/main.go b/test/lxd-benchmark/main.go
index e8f619c..44f63f3 100644
--- a/test/lxd-benchmark/main.go
+++ b/test/lxd-benchmark/main.go
@@ -166,7 +166,7 @@ func spawnContainers(c *lxd.Client, count int, image string, privileged bool) er
 		config["user.lxd-benchmark"] = "true"
 
 		// Create
-		resp, err := c.Init(name, "local", fingerprint, nil, config, nil, false)
+		resp, err := c.Init(name, "local", fingerprint, nil, config, nil, "", false)
 		if err != nil {
 			logf(fmt.Sprintf("Failed to spawn container '%s': %s", name, err))
 			return

From cd4432c400350d3e937cf99cf5bc395abc2921e3 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 2 Feb 2017 10:51:09 +0100
Subject: [PATCH 61/63] doc/database: add storage api

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 doc/database.md | 35 +++++++++++++++++++++++++++++++++++
 1 file changed, 35 insertions(+)

diff --git a/doc/database.md b/doc/database.md
index fd7de4f..fad5803 100644
--- a/doc/database.md
+++ b/doc/database.md
@@ -298,3 +298,38 @@ version         | INTEGER       | -             | NOT NULL          | Schema ver
 updated\_at     | DATETIME      | -             | NOT NULL          | When the schema update was done
 
 Index: UNIQUE ON id AND version
+
+## storage\_pools
+
+Column                  | Type          | Default       | Constraint        | Description
+:-----                  | :---          | :------       | :---------        | :----------
+id                      | INTEGER       | SERIAL        | NOT NULL          | SERIAL
+pool\_name              | VARCHAR(255)  | -             | NOT NULL          | storage pool name
+driver                  | VARCHAR(255)  | -             | NOT NULL          | storage pool driver
+
+## storage\_pools\_config
+
+Column                  | Type          | Default       | Constraint        | Description
+:-----                  | :---          | :------       | :---------        | :----------
+id                      | INTEGER       | SERIAL        | NOT NULL          | SERIAL
+storage\_pool\_id       | INTEGER       | -             | NOT NULL          | storage\_pools.id FK
+key                     | VARCHAR(255)  | -             | NOT NULL          | Configuration key
+value                   | TEXT          | -             |                   | Configuration value (NULL for unset)
+
+## storage\_volumes
+
+Column                  | Type          | Default       | Constraint        | Description
+:-----                  | :---          | :------       | :---------        | :----------
+id                      | INTEGER       | SERIAL        | NOT NULL          | SERIAL
+storage\_pool\_id       | INTEGER       | -             | NOT NULL          | storage\_pools.id FK
+volume\_name            | VARCHAR(255)  | -             | NOT NULL          | storage volume name
+storage\_volume\_type   | INTEGER       | -             | NOT NULL          | storage volume type
+
+## storage\_volumes\_config
+
+Column                  | Type          | Default       | Constraint        | Description
+:-----                  | :---          | :------       | :---------        | :----------
+id                      | INTEGER       | SERIAL        | NOT NULL          | SERIAL
+storage\_volume\_id     | INTEGER       | -             | NOT NULL          | storage\_volumes.id FK
+key                     | VARCHAR(255)  | -             | NOT NULL          | Configuration key
+value                   | TEXT          | -             |                   | Configuration value (NULL for unset)

From b8b5e7d14fd72aac6df4118da71cf560aed17ccc Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 2 Feb 2017 10:51:32 +0100
Subject: [PATCH 62/63] doc: first comments for storage api

A full expansion will follow after the storage api is merged.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 doc/storage-backends.md | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/doc/storage-backends.md b/doc/storage-backends.md
index c3db780..2cf2281 100644
--- a/doc/storage-backends.md
+++ b/doc/storage-backends.md
@@ -18,7 +18,12 @@ Nesting support                             | yes       | yes   | no    | no
 Restore from older snapshots (not latest)   | yes       | yes   | yes   | no
 Storage quotas                              | no        | yes   | no    | yes
 
-## Mixed storage
+With the implementation of the new storage api it is possible to use multiple
+storage drivers (e.g. BTRFS and ZFS) at the same time. Most of the sections
+below are only applicable to pre-storage-api LXD instances. They are kept here
+as references and marked with "pre-storage-api".
+
+## Mixed storage (pre-storage-api)
 When switching storage backend after some containers or images already exist, LXD will create any new container  
 using the new backend and converting older images to the new backend as needed.
 
@@ -34,13 +39,13 @@ rsync is used to transfer the container content across.
    all the others due to it having to unpack images or do instant copies of
    containers, snapshots and images.
 
-### Btrfs
+### Btrfs (pre-storage-api)
 
  - The btrfs backend is automatically used if /var/lib/lxd is on a btrfs filesystem.
  - Uses a subvolume per container, image and snapshot, creating btrfs snapshots when creating a new object.
  - When using for nesting, the host btrfs filesystem must be mounted with the "user\_subvol\_rm\_allowed" mount option.
 
-### LVM
+### LVM (pre-storage-api)
 
  - A LVM VG must be created and then storage.lvm\_vg\_name set to point to it.
  - If a thinpool doesn't already exist, one will be created, the name of the thinpool can be set with storage.lvm\_thinpool\_name .
@@ -48,7 +53,7 @@ rsync is used to transfer the container content across.
  - The filesystem used for the LVs is ext4 (can be configured to use xfs instead).
  - LVs are created with a default size of 10GiB (can be configured through).
 
-### ZFS
+### ZFS (pre-storage-api)
 
  - LXD can use any zpool or part of a zpool. storage.zfs\_pool\_name must be set to the path to be used.
  - ZFS doesn't have to (and shouldn't be) mounted on /var/lib/lxd

From d50b3e40db9e2b05782c77e7f119364f65623a6d Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 2 Feb 2017 10:52:54 +0100
Subject: [PATCH 63/63] make i18n

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 po/lxd.pot | 179 +++++++++++++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 144 insertions(+), 35 deletions(-)

diff --git a/po/lxd.pot b/po/lxd.pot
index 9d0b8a4..7b9695a 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -7,7 +7,7 @@
 msgid   ""
 msgstr  "Project-Id-Version: lxd\n"
         "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-        "POT-Creation-Date: 2017-01-24 18:22-0500\n"
+        "POT-Creation-Date: 2017-02-02 11:33+0100\n"
         "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
         "Last-Translator: FULL NAME <EMAIL at ADDRESS>\n"
         "Language-Team: LANGUAGE <LL at li.org>\n"
@@ -32,6 +32,35 @@ msgstr  ""
 msgid   "  Network usage:"
 msgstr  ""
 
+#: lxc/storage.go:33
+msgid   "### This is a yaml representation of a storage pool.\n"
+        "### Any line starting with a '# will be ignored.\n"
+        "###\n"
+        "### A storage pool consists of a set of configuration items.\n"
+        "###\n"
+        "### An example would look like:\n"
+        "### pool_name: default\n"
+        "### driver: zfs\n"
+        "### used_by: []\n"
+        "### config:\n"
+        "###   size: \"61203283968\"\n"
+        "###   source: /home/chb/mnt/lxd_test/default.img\n"
+        "###   zfs.pool_name: default"
+msgstr  ""
+
+#: lxc/storage.go:50
+msgid   "### This is a yaml representation of a storage volume.\n"
+        "### Any line starting with a '# will be ignored.\n"
+        "###\n"
+        "### A storage volume consists of a set of configuration items.\n"
+        "###\n"
+        "### volume_name: vol1\n"
+        "### volume_type: custom\n"
+        "### used_by: []\n"
+        "### config:\n"
+        "###   size: \"61203283968\""
+msgstr  ""
+
 #: lxc/config.go:37
 msgid   "### This is a yaml representation of the configuration.\n"
         "### Any line starting with a '# will be ignored.\n"
@@ -182,7 +211,7 @@ msgstr  ""
 msgid   "Can't unset key '%s', it's not currently set."
 msgstr  ""
 
-#: lxc/network.go:390 lxc/profile.go:424
+#: lxc/network.go:390 lxc/profile.go:424 lxc/storage.go:528
 msgid   "Cannot provide container name to list"
 msgstr  ""
 
@@ -212,11 +241,11 @@ msgstr  ""
 msgid   "Columns"
 msgstr  ""
 
-#: lxc/copy.go:32 lxc/copy.go:33 lxc/init.go:136 lxc/init.go:137
+#: lxc/copy.go:32 lxc/copy.go:33 lxc/init.go:137 lxc/init.go:138
 msgid   "Config key/value to apply to the new container"
 msgstr  ""
 
-#: lxc/config.go:535 lxc/config.go:600 lxc/image.go:737 lxc/network.go:346 lxc/profile.go:218
+#: lxc/config.go:535 lxc/config.go:600 lxc/image.go:737 lxc/network.go:346 lxc/profile.go:218 lxc/storage.go:484 lxc/storage.go:844
 #, c-format
 msgid   "Config parsing error: %s"
 msgstr  ""
@@ -229,7 +258,7 @@ msgstr  ""
 msgid   "Container name is mandatory"
 msgstr  ""
 
-#: lxc/copy.go:141 lxc/copy.go:242 lxc/init.go:230
+#: lxc/copy.go:141 lxc/copy.go:242 lxc/init.go:243
 #, c-format
 msgid   "Container name is: %s"
 msgstr  ""
@@ -288,12 +317,12 @@ msgstr  ""
 msgid   "Created: %s"
 msgstr  ""
 
-#: lxc/init.go:181 lxc/launch.go:127
+#: lxc/init.go:184 lxc/launch.go:136
 #, c-format
 msgid   "Creating %s"
 msgstr  ""
 
-#: lxc/init.go:179
+#: lxc/init.go:182
 msgid   "Creating the container"
 msgstr  ""
 
@@ -301,6 +330,10 @@ msgstr  ""
 msgid   "DESCRIPTION"
 msgstr  ""
 
+#: lxc/storage.go:563
+msgid   "DRIVER"
+msgstr  ""
+
 #: lxc/publish.go:38
 msgid   "Define a compression algorithm: for image or none"
 msgstr  ""
@@ -323,7 +356,7 @@ msgstr  ""
 msgid   "Device %s removed from %s"
 msgstr  ""
 
-#: lxc/list.go:574
+#: lxc/list.go:575
 msgid   "EPHEMERAL"
 msgstr  ""
 
@@ -347,7 +380,7 @@ msgstr  ""
 msgid   "Environment:"
 msgstr  ""
 
-#: lxc/copy.go:36 lxc/copy.go:37 lxc/init.go:140 lxc/init.go:141
+#: lxc/copy.go:36 lxc/copy.go:37 lxc/init.go:141 lxc/init.go:142
 msgid   "Ephemeral container"
 msgstr  ""
 
@@ -449,10 +482,10 @@ msgstr  ""
 msgid   "Importing the image: %s"
 msgstr  ""
 
-#: lxc/init.go:74
+#: lxc/init.go:75
 msgid   "Initialize a container from a particular image.\n"
         "\n"
-        "lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>]\n"
+        "lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>]\n"
         "\n"
         "Initializes a container using the specified image and name.\n"
         "\n"
@@ -514,7 +547,7 @@ msgstr  ""
 #: lxc/launch.go:23
 msgid   "Launch a container from a particular image.\n"
         "\n"
-        "lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>]\n"
+        "lxc launch [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>]\n"
         "\n"
         "Launches a container using the specified image and name.\n"
         "\n"
@@ -733,6 +766,46 @@ msgid   "Manage remote LXD servers.\n"
         "lxc remote get-default                                                      Print the default remote."
 msgstr  ""
 
+#: lxc/storage.go:64
+msgid   "Manage storage.\n"
+        "\n"
+        "lxc storage list [<remote>:]                           List available storage pools.\n"
+        "lxc storage show [<remote>:]<pool>                     Show details of a storage pool.\n"
+        "lxc storage create [<remote>:]<pool> [key=value]...    Create a storage pool.\n"
+        "lxc storage get [<remote>:]<pool> <key>                Get storage pool configuration.\n"
+        "lxc storage set [<remote>:]<pool> <key> <value>        Set storage pool configuration.\n"
+        "lxc storage unset [<remote>:]<pool> <key>              Unset storage pool configuration.\n"
+        "lxc storage delete [<remote>:]<pool>                   Delete a storage pool.\n"
+        "lxc storage edit [<remote>:]<pool>\n"
+        "    Edit storage pool, either by launching external editor or reading STDIN.\n"
+        "    Example: lxc storage edit [<remote>:]<pool> # launch editor\n"
+        "             cat pool.yaml | lxc storage edit [<remote>:]<pool> # read from pool.yaml\n"
+        "\n"
+        "lxc storage attach [<remote>:]<pool> <container> [device name]\n"
+        "lxc storage attach-profile [<remote:>]<pool> <profile> [device name]\n"
+        "\n"
+        "lxc storage detach <pool> <container> [device name]\n"
+        "lxc storage detach-profile <pool> <container> [device name]\n"
+        "\n"
+        "lxc storage volume list [<remote>:]<pool>                              List available storage volumes on a storage pool.\n"
+        "lxc storage volume show [<remote>:]<pool> <volume>		       Show details of a storage volume on a storage pool.\n"
+        "lxc storage volume create [<remote>:]<pool> <volume> [key=value]...    Create a storage volume on a storage pool.\n"
+        "lxc storage volume get [<remote>:]<pool> <volume> <key>                Get storage volume configuration on a storage pool.\n"
+        "lxc storage volume set [<remote>:]<pool> <volume> <key> <value>        Set storage volume configuration on a storage pool.\n"
+        "lxc storage volume unset [<remote>:]<pool> <volume> <key>              Unset storage volume configuration on a storage pool.\n"
+        "lxc storage volume delete [<remote>:]<pool> <volume>                   Delete a storage volume on a storage pool.\n"
+        "lxc storage volume edit [<remote>:]<pool> <volume>\n"
+        "    Edit storage pool, either by launching external editor or reading STDIN.\n"
+        "    Example: lxc storage volume edit [<remote>:]<pool> <volume> # launch editor\n"
+        "             cat pool.yaml | lxc storage volume edit [<remote>:]<pool> <volume> # read from pool.yaml\n"
+        "\n"
+        "lxc storage volume attach [<remote>:]<pool> <volume> <container> [device name] <path>\n"
+        "lxc storage volume attach-profile [<remote:>]<pool> <volume> <profile> [device name] <path>\n"
+        "\n"
+        "lxc storage volume detach [<remote>:]<pool> <volume> <container> [device name]\n"
+        "lxc storage volume detach-profile [<remote:>]<pool> <volume> <profile> [device name]\n"
+msgstr  ""
+
 #: lxc/image.go:96
 msgid   "Manipulate container images.\n"
         "\n"
@@ -827,7 +900,7 @@ msgid   "Monitor activity on the LXD server.\n"
         "    lxc monitor --type=logging"
 msgstr  ""
 
-#: lxc/network.go:216 lxc/network.go:265
+#: lxc/network.go:216 lxc/network.go:265 lxc/storage.go:315 lxc/storage.go:411
 msgid   "More than one device matches, specify the device name."
 msgstr  ""
 
@@ -852,7 +925,7 @@ msgstr  ""
 msgid   "Must supply container name for: "
 msgstr  ""
 
-#: lxc/list.go:430 lxc/network.go:426 lxc/profile.go:451 lxc/remote.go:380
+#: lxc/list.go:430 lxc/network.go:426 lxc/profile.go:451 lxc/remote.go:380 lxc/storage.go:562 lxc/storage.go:653 lxc/storage.go:692
 msgid   "NAME"
 msgstr  ""
 
@@ -875,7 +948,7 @@ msgstr  ""
 msgid   "Network %s deleted"
 msgstr  ""
 
-#: lxc/init.go:142 lxc/init.go:143
+#: lxc/init.go:143 lxc/init.go:144
 msgid   "Network name"
 msgstr  ""
 
@@ -891,6 +964,10 @@ msgstr  ""
 msgid   "No device found for this network"
 msgstr  ""
 
+#: lxc/storage.go:324 lxc/storage.go:420
+msgid   "No device found for this storage volume."
+msgstr  ""
+
 #: lxc/config.go:308
 msgid   "No fingerprint specified."
 msgstr  ""
@@ -920,7 +997,7 @@ msgstr  ""
 msgid   "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr  ""
 
-#: lxc/list.go:576
+#: lxc/list.go:577
 msgid   "PERSISTENT"
 msgstr  ""
 
@@ -965,7 +1042,7 @@ msgstr  ""
 msgid   "Pid: %d"
 msgstr  ""
 
-#: lxc/network.go:347 lxc/profile.go:219
+#: lxc/network.go:347 lxc/profile.go:219 lxc/storage.go:485 lxc/storage.go:845
 msgid   "Press enter to open the editor again"
 msgstr  ""
 
@@ -1020,7 +1097,7 @@ msgstr  ""
 msgid   "Profile %s removed from %s"
 msgstr  ""
 
-#: lxc/copy.go:34 lxc/copy.go:35 lxc/init.go:138 lxc/init.go:139
+#: lxc/copy.go:34 lxc/copy.go:35 lxc/init.go:139 lxc/init.go:140
 msgid   "Profile to apply to the new container"
 msgstr  ""
 
@@ -1095,12 +1172,12 @@ msgid   "Restore a container's state to a previous snapshot.\n"
         "    lxc restore u1 snap0"
 msgstr  ""
 
-#: lxc/init.go:240
+#: lxc/init.go:253
 #, c-format
 msgid   "Retrieving image: %s"
 msgstr  ""
 
-#: lxc/image.go:646
+#: lxc/image.go:646 lxc/storage.go:564 lxc/storage.go:694
 msgid   "SIZE"
 msgstr  ""
 
@@ -1108,6 +1185,10 @@ msgstr  ""
 msgid   "SNAPSHOTS"
 msgstr  ""
 
+#: lxc/storage.go:565
+msgid   "SOURCE"
+msgstr  ""
+
 #: lxc/list.go:434
 msgid   "STATE"
 msgstr  ""
@@ -1116,6 +1197,10 @@ msgstr  ""
 msgid   "STATIC"
 msgstr  ""
 
+#: lxc/list.go:436
+msgid   "STORAGE POOL"
+msgstr  ""
+
 #: lxc/remote.go:225
 msgid   "Server certificate NACKed by user"
 msgstr  ""
@@ -1169,7 +1254,7 @@ msgstr  ""
 msgid   "Source:"
 msgstr  ""
 
-#: lxc/launch.go:136
+#: lxc/launch.go:145
 #, c-format
 msgid   "Starting %s"
 msgstr  ""
@@ -1187,6 +1272,30 @@ msgstr  ""
 msgid   "Stopping container failed!"
 msgstr  ""
 
+#: lxc/storage.go:385
+#, c-format
+msgid   "Storage pool %s created"
+msgstr  ""
+
+#: lxc/storage.go:435
+#, c-format
+msgid   "Storage pool %s deleted"
+msgstr  ""
+
+#: lxc/init.go:145 lxc/init.go:146
+msgid   "Storage pool name"
+msgstr  ""
+
+#: lxc/storage.go:716
+#, c-format
+msgid   "Storage volume %s created"
+msgstr  ""
+
+#: lxc/storage.go:725
+#, c-format
+msgid   "Storage volume %s deleted"
+msgstr  ""
+
 #: lxc/action.go:46
 msgid   "Store the container state (only for stop)"
 msgstr  ""
@@ -1199,7 +1308,7 @@ msgstr  ""
 msgid   "Swap (peak)"
 msgstr  ""
 
-#: lxc/list.go:435 lxc/network.go:427
+#: lxc/list.go:435 lxc/network.go:427 lxc/storage.go:654 lxc/storage.go:693
 msgid   "TYPE"
 msgstr  ""
 
@@ -1211,7 +1320,7 @@ msgstr  ""
 msgid   "The container is currently running. Use --force to have it stopped and restarted."
 msgstr  ""
 
-#: lxc/init.go:313
+#: lxc/init.go:326
 msgid   "The container you are starting doesn't have any network attached to it."
 msgstr  ""
 
@@ -1219,7 +1328,7 @@ msgstr  ""
 msgid   "The device doesn't exist"
 msgstr  ""
 
-#: lxc/init.go:297
+#: lxc/init.go:310
 #, c-format
 msgid   "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr  ""
@@ -1228,7 +1337,7 @@ msgstr  ""
 msgid   "The opposite of `lxc pause` is `lxc start`."
 msgstr  ""
 
-#: lxc/network.go:230 lxc/network.go:279
+#: lxc/network.go:230 lxc/network.go:279 lxc/storage.go:329 lxc/storage.go:425
 msgid   "The specified device doesn't exist"
 msgstr  ""
 
@@ -1248,11 +1357,11 @@ msgstr  ""
 msgid   "Timestamps:"
 msgstr  ""
 
-#: lxc/init.go:315
+#: lxc/init.go:328
 msgid   "To attach a network to a container, use: lxc network attach"
 msgstr  ""
 
-#: lxc/init.go:314
+#: lxc/init.go:327
 msgid   "To create a new network, use: lxc network create"
 msgstr  ""
 
@@ -1265,7 +1374,7 @@ msgstr  ""
 msgid   "Transferring image: %s"
 msgstr  ""
 
-#: lxc/action.go:100 lxc/launch.go:149
+#: lxc/action.go:100 lxc/launch.go:158
 #, c-format
 msgid   "Try `lxc info --show-log %s` for more info"
 msgstr  ""
@@ -1286,7 +1395,7 @@ msgstr  ""
 msgid   "URL"
 msgstr  ""
 
-#: lxc/network.go:429 lxc/profile.go:452
+#: lxc/network.go:429 lxc/profile.go:452 lxc/storage.go:566 lxc/storage.go:655 lxc/storage.go:695
 msgid   "USED BY"
 msgstr  ""
 
@@ -1328,7 +1437,7 @@ msgstr  ""
 msgid   "`lxc config profile` is deprecated, please use `lxc profile`"
 msgstr  ""
 
-#: lxc/launch.go:120
+#: lxc/launch.go:129
 msgid   "bad number of things scanned from image, container or snapshot"
 msgstr  ""
 
@@ -1356,7 +1465,7 @@ msgstr  ""
 msgid   "default"
 msgstr  ""
 
-#: lxc/copy.go:132 lxc/copy.go:137 lxc/copy.go:233 lxc/copy.go:238 lxc/init.go:220 lxc/init.go:225 lxc/launch.go:104 lxc/launch.go:109
+#: lxc/copy.go:132 lxc/copy.go:137 lxc/copy.go:233 lxc/copy.go:238 lxc/init.go:233 lxc/init.go:238 lxc/launch.go:113 lxc/launch.go:118
 msgid   "didn't get any affected image, container or snapshot from server"
 msgstr  ""
 
@@ -1378,7 +1487,7 @@ msgstr  ""
 msgid   "error: unknown command: %s"
 msgstr  ""
 
-#: lxc/launch.go:124
+#: lxc/launch.go:133
 msgid   "got bad version"
 msgstr  ""
 
@@ -1394,7 +1503,7 @@ msgstr  ""
 msgid   "ok (y/n)?"
 msgstr  ""
 
-#: lxc/main.go:304 lxc/main.go:308
+#: lxc/main.go:305 lxc/main.go:309
 #, c-format
 msgid   "processing aliases failed %s\n"
 msgstr  ""
@@ -1436,11 +1545,11 @@ msgstr  ""
 msgid   "taken at %s"
 msgstr  ""
 
-#: lxc/exec.go:186
+#: lxc/exec.go:189
 msgid   "unreachable return reached"
 msgstr  ""
 
-#: lxc/main.go:236
+#: lxc/main.go:237
 msgid   "wrong number of subcommand arguments"
 msgstr  ""
 


More information about the lxc-devel mailing list