[lxc-devel] [lxd/master] Add btrfs storage driver
monstermunchkin on Github
lxc-bot at linuxcontainers.org
Mon Nov 18 14:27:32 UTC 2019
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20191118/30eb4e45/attachment-0001.bin>
-------------- next part --------------
From 52bbc8b5e62784d1f34f7fa28da374d5d66f45c5 Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.hipp at canonical.com>
Date: Thu, 14 Nov 2019 10:59:51 +0100
Subject: [PATCH 1/4] lxd/storage: Move storage_cgo.go to drivers package
Signed-off-by: Thomas Hipp <thomas.hipp at canonical.com>
---
lxd/storage/{storage_cgo.go => drivers/driver_cgo.go} | 6 +++---
lxd/storage_btrfs.go | 2 +-
lxd/storage_lvm.go | 7 ++++---
3 files changed, 8 insertions(+), 7 deletions(-)
rename lxd/storage/{storage_cgo.go => drivers/driver_cgo.go} (98%)
diff --git a/lxd/storage/storage_cgo.go b/lxd/storage/drivers/driver_cgo.go
similarity index 98%
rename from lxd/storage/storage_cgo.go
rename to lxd/storage/drivers/driver_cgo.go
index 879e94ff54..fd066095e1 100644
--- a/lxd/storage/storage_cgo.go
+++ b/lxd/storage/drivers/driver_cgo.go
@@ -1,7 +1,7 @@
// +build linux
// +build cgo
-package storage
+package drivers
import (
"fmt"
@@ -29,8 +29,8 @@ import (
#include <sys/stat.h>
#include <sys/types.h>
-#include "../include/macro.h"
-#include "../include/memory_utils.h"
+#include "../../include/macro.h"
+#include "../../include/memory_utils.h"
#define LXD_MAXPATH 4096
#define LXD_NUMSTRLEN64 21
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 1d31793083..335ebd5a51 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -426,7 +426,7 @@ func (s *storageBtrfs) StoragePoolMount() (bool, error) {
// Since we mount the loop device LO_FLAGS_AUTOCLEAR is
// fine since the loop device will be kept around for as
// long as the mount exists.
- loopF, loopErr := driver.PrepareLoopDev(source, driver.LoFlagsAutoclear)
+ loopF, loopErr := drivers.PrepareLoopDev(source, drivers.LoFlagsAutoclear)
if loopErr != nil {
return false, loopErr
}
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index ce7aa01137..62b7421946 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -18,6 +18,7 @@ import (
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/rsync"
driver "github.com/lxc/lxd/lxd/storage"
+ "github.com/lxc/lxd/lxd/storage/drivers"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/ioprogress"
@@ -395,7 +396,7 @@ func (s *storageLvm) StoragePoolDelete() error {
if s.loopInfo != nil {
// Set LO_FLAGS_AUTOCLEAR before we remove the loop file
// otherwise we will get EBADF.
- err = driver.SetAutoclearOnLoopDev(int(s.loopInfo.Fd()))
+ err = drivers.SetAutoclearOnLoopDev(int(s.loopInfo.Fd()))
if err != nil {
logger.Warnf("Failed to set LO_FLAGS_AUTOCLEAR on loop device: %s, manual cleanup needed", err)
}
@@ -467,12 +468,12 @@ func (s *storageLvm) StoragePoolMount() (bool, error) {
if filepath.IsAbs(source) && !shared.IsBlockdevPath(source) {
// Try to prepare new loop device.
- loopF, loopErr := driver.PrepareLoopDev(source, 0)
+ loopF, loopErr := drivers.PrepareLoopDev(source, 0)
if loopErr != nil {
return false, loopErr
}
// Make sure that LO_FLAGS_AUTOCLEAR is unset.
- loopErr = driver.UnsetAutoclearOnLoopDev(int(loopF.Fd()))
+ loopErr = drivers.UnsetAutoclearOnLoopDev(int(loopF.Fd()))
if loopErr != nil {
return false, loopErr
}
From 5d4ced5218e338af759102ba4c8033d8184eec6a Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.hipp at canonical.com>
Date: Mon, 18 Nov 2019 15:18:18 +0100
Subject: [PATCH 2/4] lxd/storage/drivers: Move FS and mount functions
This moves FS and mount functions to the drivers package.
Signed-off-by: Thomas Hipp <thomas.hipp at canonical.com>
---
lxd/storage/drivers/utils.go | 96 ++++++++++++++++++++++++++++++++++++
1 file changed, 96 insertions(+)
diff --git a/lxd/storage/drivers/utils.go b/lxd/storage/drivers/utils.go
index 71a496c72a..f47b8191eb 100644
--- a/lxd/storage/drivers/utils.go
+++ b/lxd/storage/drivers/utils.go
@@ -5,6 +5,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "strings"
"time"
"golang.org/x/sys/unix"
@@ -14,6 +15,11 @@ import (
"github.com/lxc/lxd/shared/api"
)
+// MkfsOptions represents options for filesystem creation.
+type MkfsOptions struct {
+ Label string
+}
+
func wipeDirectory(path string) error {
// List all entries
entries, err := ioutil.ReadDir(path)
@@ -235,3 +241,93 @@ func createSparseFile(filePath string, sizeBytes int64) error {
return nil
}
+
+// MakeFSType creates the provided filesystem.
+func MakeFSType(path string, fsType string, options *MkfsOptions) (string, error) {
+ var err error
+ var msg string
+
+ fsOptions := options
+ if fsOptions == nil {
+ fsOptions = &MkfsOptions{}
+ }
+
+ cmd := []string{fmt.Sprintf("mkfs.%s", fsType), path}
+ if fsOptions.Label != "" {
+ cmd = append(cmd, "-L", fsOptions.Label)
+ }
+
+ if fsType == "ext4" {
+ cmd = append(cmd, "-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0")
+ }
+
+ msg, err = shared.TryRunCommand(cmd[0], cmd[1:]...)
+ if err != nil {
+ return msg, err
+ }
+
+ return "", nil
+}
+
+// Export the mount options map since we might find it useful in other parts of
+// LXD.
+type mountOptions struct {
+ capture bool
+ flag uintptr
+}
+
+// MountOptions represents a list of possible mount options.
+var MountOptions = map[string]mountOptions{
+ "async": {false, unix.MS_SYNCHRONOUS},
+ "atime": {false, unix.MS_NOATIME},
+ "bind": {true, unix.MS_BIND},
+ "defaults": {true, 0},
+ "dev": {false, unix.MS_NODEV},
+ "diratime": {false, unix.MS_NODIRATIME},
+ "dirsync": {true, unix.MS_DIRSYNC},
+ "exec": {false, unix.MS_NOEXEC},
+ "lazytime": {true, unix.MS_LAZYTIME},
+ "mand": {true, unix.MS_MANDLOCK},
+ "noatime": {true, unix.MS_NOATIME},
+ "nodev": {true, unix.MS_NODEV},
+ "nodiratime": {true, unix.MS_NODIRATIME},
+ "noexec": {true, unix.MS_NOEXEC},
+ "nomand": {false, unix.MS_MANDLOCK},
+ "norelatime": {false, unix.MS_RELATIME},
+ "nostrictatime": {false, unix.MS_STRICTATIME},
+ "nosuid": {true, unix.MS_NOSUID},
+ "rbind": {true, unix.MS_BIND | unix.MS_REC},
+ "relatime": {true, unix.MS_RELATIME},
+ "remount": {true, unix.MS_REMOUNT},
+ "ro": {true, unix.MS_RDONLY},
+ "rw": {false, unix.MS_RDONLY},
+ "strictatime": {true, unix.MS_STRICTATIME},
+ "suid": {false, unix.MS_NOSUID},
+ "sync": {true, unix.MS_SYNCHRONOUS},
+}
+
+// resolveMountOptions resolves the provided mount options.
+func resolveMountOptions(options string) (uintptr, string) {
+ mountFlags := uintptr(0)
+ tmp := strings.SplitN(options, ",", -1)
+ for i := 0; i < len(tmp); i++ {
+ opt := tmp[i]
+ do, ok := MountOptions[opt]
+ if !ok {
+ continue
+ }
+
+ if do.capture {
+ mountFlags |= do.flag
+ } else {
+ mountFlags &= ^do.flag
+ }
+
+ copy(tmp[i:], tmp[i+1:])
+ tmp[len(tmp)-1] = ""
+ tmp = tmp[:len(tmp)-1]
+ i--
+ }
+
+ return mountFlags, strings.Join(tmp, ",")
+}
From dbeef7e28356b9121a8a915d24447b09a32c850b Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.hipp at canonical.com>
Date: Mon, 18 Nov 2019 15:20:30 +0100
Subject: [PATCH 3/4] lxd/storage: Mark fs/mount functions as deprecated
This marks some FS and mount functions as deprecated as they have been
moved to the drivers package.
Signed-off-by: Thomas Hipp <thomas.hipp at canonical.com>
---
lxd/storage/utils.go | 3 +++
1 file changed, 3 insertions(+)
diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go
index 25e8e273fd..faa77878a6 100644
--- a/lxd/storage/utils.go
+++ b/lxd/storage/utils.go
@@ -31,6 +31,7 @@ var baseDirectories = map[drivers.VolumeType][]string{
var VolumeUsedByInstancesWithProfiles func(s *state.State, poolName string, volumeName string, volumeTypeName string, runningOnly bool) ([]string, error)
// MkfsOptions represents options for filesystem creation.
+// Deprecated: Use drivers.MkfsOptions.
type MkfsOptions struct {
Label string
}
@@ -73,6 +74,7 @@ var MountOptions = map[string]mountOptions{
}
// LXDResolveMountoptions resolves the provided mount options.
+// Deprecated: Use drivers.LXDResolveMountoptions.
func LXDResolveMountoptions(options string) (uintptr, string) {
mountFlags := uintptr(0)
tmp := strings.SplitN(options, ",", -1)
@@ -225,6 +227,7 @@ func LXDUsesPool(dbObj *db.Cluster, onDiskPoolName string, driver string, onDisk
}
// MakeFSType creates the provided filesystem.
+// Deprecated: Use drivers.MakeFSType.
func MakeFSType(path string, fsType string, options *MkfsOptions) (string, error) {
var err error
var msg string
From 89052290142543986d19eeba71269c829b1aa30b Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.hipp at canonical.com>
Date: Mon, 18 Nov 2019 15:24:41 +0100
Subject: [PATCH 4/4] lxd/storage/drivers: Add btrfs
This adds the btrfs storage driver.
Signed-off-by: Thomas Hipp <thomas.hipp at canonical.com>
---
lxd/storage/drivers/driver_btrfs.go | 740 ++++++++++++++++++++++
lxd/storage/drivers/driver_btrfs_utils.go | 380 +++++++++++
lxd/storage/drivers/load.go | 1 +
3 files changed, 1121 insertions(+)
create mode 100644 lxd/storage/drivers/driver_btrfs.go
create mode 100644 lxd/storage/drivers/driver_btrfs_utils.go
diff --git a/lxd/storage/drivers/driver_btrfs.go b/lxd/storage/drivers/driver_btrfs.go
new file mode 100644
index 0000000000..c26d7d75a9
--- /dev/null
+++ b/lxd/storage/drivers/driver_btrfs.go
@@ -0,0 +1,740 @@
+package drivers
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/lxc/lxd/lxd/migration"
+ "github.com/lxc/lxd/lxd/operations"
+ "github.com/lxc/lxd/lxd/storage/quota"
+ "github.com/lxc/lxd/shared"
+ "github.com/lxc/lxd/shared/api"
+ log "github.com/lxc/lxd/shared/log15"
+ "github.com/lxc/lxd/shared/units"
+)
+
+var btrfsVersion string
+var btrfsLoaded bool
+
+type btrfs struct {
+ common
+
+ remount uintptr
+}
+
+func (d *btrfs) load() error {
+ if btrfsLoaded {
+ return nil
+ }
+
+ // Validate the required binaries.
+ for _, tool := range []string{"btrfs"} {
+ _, err := exec.LookPath(tool)
+ if err != nil {
+ return fmt.Errorf("Required tool '%s' is missing", tool)
+ }
+ }
+
+ // Detect and record the version.
+ if btrfsVersion == "" {
+ out, err := shared.RunCommand("btrfs", "version")
+ if err != nil {
+ return err
+ }
+
+ count, err := fmt.Sscanf(strings.SplitN(out, " ", 2)[1], "v%s\n", &btrfsVersion)
+ if err != nil || count != 1 {
+ return fmt.Errorf("The 'btrfs' tool isn't working properly")
+ }
+ }
+
+ btrfsLoaded = true
+ return nil
+}
+
+// Info returns info about the driver and its environment.
+func (d *btrfs) Info() Info {
+ return Info{
+ Name: "btrfs",
+ Version: btrfsVersion,
+ OptimizedImages: false,
+ PreservesInodes: !d.state.OS.RunningInUserNS,
+ Remote: false,
+ VolumeTypes: []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer, VolumeTypeVM},
+ BlockBacking: false,
+ RunningQuotaResize: true,
+ }
+}
+
+func (d *btrfs) Create() error {
+ // WARNING: The Create() function cannot rely on any of the struct attributes being set.
+
+ // Set default source if missing.
+ defaultSource := filepath.Join(shared.VarPath("disks"), fmt.Sprintf("%s.img", d.name))
+ source := d.config["source"]
+
+ if source == "" {
+ source = defaultSource
+ d.config["source"] = source
+ } else if strings.HasPrefix(source, "/") {
+ source = shared.HostPath(source)
+ } else {
+ return fmt.Errorf("Invalid \"source\" property")
+ }
+
+ poolMntPoint := GetPoolMountPath(d.name)
+ isBlockDev := false
+
+ if source == defaultSource {
+ size, err := units.ParseByteSizeString(d.config["size"])
+ if err != nil {
+ return err
+ }
+
+ err = createSparseFile(source, size)
+ if err != nil {
+ return fmt.Errorf("Failed to create sparse file %q: %s", source, err)
+ }
+
+ output, err := MakeFSType(source, "btrfs", &MkfsOptions{Label: d.name})
+ if err != nil {
+ return fmt.Errorf("Failed to create btrfs: %v (%s)", err, output)
+ }
+ } else {
+ isBlockDev = shared.IsBlockdevPath(source)
+
+ if isBlockDev {
+ output, err := MakeFSType(source, "btrfs", &MkfsOptions{Label: d.name})
+ if err != nil {
+ return fmt.Errorf("Failed to create btrfs: %v (%s)", err, output)
+ }
+ } else {
+ if isBtrfsSubvolume(source) {
+ subvols, err := btrfsSubvolumesGet(source)
+ if err != nil {
+ return fmt.Errorf("Could not determine if existing btrfs subvolume ist empty: %s", err)
+ }
+ if len(subvols) > 0 {
+ return fmt.Errorf("Requested btrfs subvolume exists but is not empty")
+ }
+ } else {
+ cleanSource := filepath.Clean(source)
+ lxdDir := shared.VarPath()
+
+ if shared.PathExists(source) && !isOnBtrfs(source) {
+ return fmt.Errorf("Existing path is neither a btrfs subvolume nor does it reside on a btrfs filesystem")
+ } else if strings.HasPrefix(cleanSource, lxdDir) {
+ if cleanSource != poolMntPoint {
+ return fmt.Errorf("btrfs subvolumes requests in LXD directory %q are only valid under %q\n(e.g. source=%s)", shared.VarPath(), shared.VarPath("storage-pools"), poolMntPoint)
+ } else if d.state.OS.BackingFS != "btrfs" {
+ return fmt.Errorf("Creation of btrfs subvolume requested but %q does not reside on btrfs filesystem", source)
+ }
+ }
+
+ err := btrfsSubvolumeCreate(source)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ var err error
+ var devUUID string
+ mountFlags, mountOptions := resolveMountOptions(d.getMountOptions())
+ mountFlags |= d.remount
+
+ if isBlockDev {
+ devUUID, _ = shared.LookupUUIDByBlockDevPath(source)
+ // The symlink might not have been created even with the delay
+ // we granted it above. So try to call btrfs filesystem show and
+ // parse it out. (I __hate__ this!)
+ if devUUID == "" {
+ devUUID, err = btrfsLookupFsUUID(source)
+ if err != nil {
+ return err
+ }
+ }
+ d.config["source"] = devUUID
+
+ // If the symlink in /dev/disk/by-uuid hasn't been created yet
+ // aka we only detected it by parsing btrfs filesystem show, we
+ // cannot call StoragePoolMount() since it will try to do the
+ // reverse operation. So instead we shamelessly mount using the
+ // block device path at the time of pool creation.
+ err = tryMount(source, GetPoolMountPath(d.name), "btrfs", mountFlags, mountOptions)
+ } else {
+ _, err = d.Mount()
+ }
+ if err != nil {
+ return err
+ }
+
+ // Create default subvolumes.
+ subvolumes := []string{
+ filepath.Join(poolMntPoint, "containers"),
+ filepath.Join(poolMntPoint, "containers-snapshots"),
+ filepath.Join(poolMntPoint, "custom"),
+ filepath.Join(poolMntPoint, "custom-snapshots"),
+ filepath.Join(poolMntPoint, "images"),
+ filepath.Join(poolMntPoint, "virtual-machines"),
+ filepath.Join(poolMntPoint, "virtual-machines-snapshots"),
+ }
+
+ for _, subvol := range subvolumes {
+ err := btrfsSubvolumeCreate(subvol)
+ if err != nil {
+ return fmt.Errorf("Could not create btrfs subvolume: %s", subvol)
+ }
+ }
+
+ return nil
+}
+
+// Delete removes the storage pool from the storage device.
+func (d *btrfs) Delete(op *operations.Operation) error {
+ source := d.config["source"]
+
+ if source == "" {
+ return fmt.Errorf("no \"source\" property found for the storage pool")
+ }
+
+ if strings.HasPrefix(source, "/") {
+ source = shared.HostPath(d.config["source"])
+ }
+
+ poolMntPoint := GetPoolMountPath(d.name)
+
+ // Delete default subvolumes.
+ subvolumes := []string{
+ filepath.Join(poolMntPoint, "containers"),
+ filepath.Join(poolMntPoint, "containers-snapshots"),
+ filepath.Join(poolMntPoint, "custom"),
+ filepath.Join(poolMntPoint, "custom-snapshots"),
+ filepath.Join(poolMntPoint, "images"),
+ filepath.Join(poolMntPoint, "virtual-machines"),
+ filepath.Join(poolMntPoint, "virtual-machines-snapshots"),
+ }
+
+ for _, subvol := range subvolumes {
+ err := btrfsSubvolumesDelete(subvol)
+ if err != nil {
+ return fmt.Errorf("Could not delete btrfs subvolume: %s", subvol)
+ }
+ }
+
+ _, err := d.Unmount()
+ if err != nil {
+ return err
+ }
+
+ if filepath.IsAbs(source) {
+ var err error
+ cleanSource := filepath.Clean(source)
+ sourcePath := shared.VarPath("disks", d.name)
+ loopFilePath := sourcePath + ".img"
+
+ if cleanSource == loopFilePath {
+ // This is a loop file so simply remove it.
+ err = os.Remove(source)
+ } else {
+ if !isBtrfsFilesystem(source) && isBtrfsSubvolume(source) {
+ err = btrfsSubvolumesDelete(source)
+ }
+ }
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+
+ return wipeDirectory(poolMntPoint)
+}
+
+// Mount mounts the storage pool.
+func (d *btrfs) Mount() (bool, error) {
+ source := d.config["source"]
+
+ if source == "" {
+ return false, fmt.Errorf("no \"source\" property found for the storage pool")
+ }
+
+ if strings.HasPrefix(source, "/") {
+ source = shared.HostPath(d.config["source"])
+ }
+
+ poolMntPoint := GetPoolMountPath(d.name)
+
+ // Check whether the mount poolMntPoint exits.
+ if !shared.PathExists(poolMntPoint) {
+ err := os.MkdirAll(poolMntPoint, 0711)
+ if err != nil {
+ return false, err
+ }
+ }
+
+ if shared.IsMountPoint(poolMntPoint) && (d.remount&unix.MS_REMOUNT) == 0 {
+ return false, nil
+ }
+
+ mountFlags, mountOptions := resolveMountOptions(d.getMountOptions())
+ mountSource := source
+ isBlockDev := shared.IsBlockdevPath(source)
+
+ if filepath.IsAbs(source) {
+ cleanSource := filepath.Clean(source)
+ loopFilePath := shared.VarPath("disks", d.name+".img")
+
+ if !isBlockDev && cleanSource == loopFilePath {
+ // If source == "${LXD_DIR}"/disks/{pool_name} it is a
+ // loop file we're dealing with.
+ //
+ // Since we mount the loop device LO_FLAGS_AUTOCLEAR is
+ // fine since the loop device will be kept around for as
+ // long as the mount exists.
+ loopF, loopErr := PrepareLoopDev(source, LoFlagsAutoclear)
+ if loopErr != nil {
+ return false, loopErr
+ }
+ mountSource = loopF.Name()
+ defer loopF.Close()
+ } else if !isBlockDev && cleanSource != poolMntPoint {
+ mountSource = source
+ mountFlags |= unix.MS_BIND
+ } else if !isBlockDev && cleanSource == poolMntPoint && d.state.OS.BackingFS == "btrfs" {
+ return false, nil
+ }
+ // User is using block device path.
+ } else {
+ // Try to lookup the disk device by UUID but don't fail. If we
+ // don't find one this might just mean we have been given the
+ // UUID of a subvolume.
+ byUUID := fmt.Sprintf("/dev/disk/by-uuid/%s", source)
+ diskPath, err := os.Readlink(byUUID)
+ if err == nil {
+ mountSource = fmt.Sprintf("/dev/%s", strings.Trim(diskPath, "../../"))
+ } else {
+ // We have very likely been given a subvolume UUID. In
+ // this case we should simply assume that the user has
+ // mounted the parent of the subvolume or the subvolume
+ // itself. Otherwise this becomes a really messy
+ // detection task.
+ return false, nil
+ }
+ }
+
+ mountFlags |= d.remount
+ err := unix.Mount(mountSource, poolMntPoint, "btrfs", mountFlags, mountOptions)
+ if err != nil {
+ return false, err
+ }
+
+ return true, nil
+}
+
+// Unmount unmounts the storage pool.
+func (d *btrfs) Unmount() (bool, error) {
+ poolMntPoint := GetPoolMountPath(d.name)
+ return forceUnmount(poolMntPoint)
+}
+
+func (d *btrfs) GetResources() (*api.ResourcesStoragePool, error) {
+ // Use the generic VFS resources.
+ return vfsResources(GetPoolMountPath(d.name))
+}
+
+// GetVolumeUsage returns the disk space used by the volume.
+func (d *btrfs) GetVolumeUsage(volType VolumeType, volName string) (int64, error) {
+ return btrfsPoolVolumeQGroupUsage(GetVolumeMountPath(d.name, volType, volName))
+}
+
+// ValidateVolume validates the supplied volume config.
+func (d *btrfs) ValidateVolume(vol Volume, removeUnknownKeys bool) error {
+ return d.validateVolume(vol, nil, removeUnknownKeys)
+}
+
+// HasVolume indicates whether a specific volume exists on the storage pool.
+func (d *btrfs) HasVolume(volType VolumeType, volName string) bool {
+ return isBtrfsSubvolume(GetVolumeMountPath(d.name, volType, volName))
+}
+
+// GetVolumeDiskPath returns the location and file format of a disk volume.
+func (d *btrfs) GetVolumeDiskPath(volType VolumeType, volName string) (string, error) {
+ return filepath.Join(GetVolumeMountPath(d.name, volType, volName), "root.img"), nil
+}
+
+// CreateVolume creates an empty volume and can optionally fill it by executing the supplied
+// filler function.
+func (d *btrfs) CreateVolume(vol Volume, filler func(mountPath, rootBlockPath string) error, op *operations.Operation) error {
+ volPath := vol.MountPath()
+
+ err := btrfsSubvolumeCreate(volPath)
+ if err != nil {
+ return err
+ }
+
+ revertSubvolume := true
+ defer func() {
+ if revertSubvolume {
+ btrfsSubvolumeDelete(volPath)
+ }
+ }()
+
+ // Extract specified size from pool or volume config.
+ size := d.config["volume.size"]
+ if vol.config["size"] != "" {
+ size = vol.config["size"]
+ }
+
+ // Create sparse loopback file if volume is block.
+ rootBlockPath := ""
+ if vol.contentType == ContentTypeBlock {
+ // We expect the filler to copy the VM image into this path.
+ rootBlockPath, err = d.GetVolumeDiskPath(vol.volType, vol.name)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Get the volume ID for the new volume, which is used to set project quota.
+ volID, err := d.getVolID(vol.volType, vol.name)
+ if err != nil {
+ return err
+ }
+
+ // Initialise the volume's quota using the volume ID.
+ err = d.initQuota(volPath, volID)
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ if revertSubvolume {
+ d.deleteQuota(volPath, volID)
+ }
+ }()
+
+ // Set the quota.
+ err = d.setQuota(volPath, volID, size)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Run the volume filler function if supplied.
+ if filler != nil {
+ err = filler(volPath, rootBlockPath)
+ if err != nil {
+ return err
+ }
+ }
+
+ // If we are creating a block volume, resize it to the requested size or 10GB.
+ // We expect the filler function to have converted the qcow2 image to raw into the rootBlockPath.
+ if vol.contentType == ContentTypeBlock {
+ blockSize := size
+ if blockSize == "" {
+ blockSize = "10GB"
+ }
+
+ blockSizeBytes, err := units.ParseByteSizeString(blockSize)
+ if err != nil {
+ return err
+ }
+
+ if shared.PathExists(rootBlockPath) {
+ _, err = shared.RunCommand("qemu-img", "resize", "-f", "raw", rootBlockPath, fmt.Sprintf("%d", blockSizeBytes))
+ if err != nil {
+ return fmt.Errorf("Failed resizing disk image %s to size %s: %v", rootBlockPath, blockSize, err)
+ }
+ } else {
+ // If rootBlockPath doesn't exist, then there has been no filler function
+ // supplied to create it from another source. So instead create an empty
+ // volume (use for PXE booting a VM).
+ _, err = shared.RunCommand("qemu-img", "create", "-f", "raw", rootBlockPath, fmt.Sprintf("%d", blockSizeBytes))
+ if err != nil {
+ return fmt.Errorf("Failed creating disk image %s as size %s: %v", rootBlockPath, blockSize, err)
+ }
+ }
+ }
+
+ revertSubvolume = false
+ return nil
+}
+
+// MigrateVolume sends a volume for migration.
+func (d *btrfs) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs migration.VolumeSourceArgs, op *operations.Operation) error {
+ return ErrNotImplemented
+}
+
+// CreateVolumeFromMigration creates a volume being sent via a migration.
+func (d *btrfs) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, op *operations.Operation) error {
+ return ErrNotImplemented
+}
+
+// CreateVolumeFromCopy provides same-pool volume copying functionality.
+func (d *btrfs) CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots bool, op *operations.Operation) error {
+ err := btrfsSubvolumesSnapshotCreate(srcVol.MountPath(), vol.MountPath(), false, true, d.state.OS.RunningInUserNS)
+ if err != nil {
+ return err
+ }
+
+ if !copySnapshots || srcVol.IsSnapshot() {
+ return nil
+ }
+
+ snapshots, err := d.VolumeSnapshots(srcVol.volType, srcVol.name, op)
+ if err != nil {
+ return err
+ }
+
+ if len(snapshots) == 0 {
+ return nil
+ }
+
+ for _, snap := range snapshots {
+ srcSnapshot := GetVolumeMountPath(d.name, srcVol.volType, GetSnapshotVolumeName(srcVol.name, snap))
+ dstSnapshot := GetVolumeMountPath(d.name, vol.volType, GetSnapshotVolumeName(vol.name, snap))
+
+ err = btrfsSubvolumeSnapshotCreate(srcSnapshot, dstSnapshot, true, d.state.OS.RunningInUserNS)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// VolumeSnapshots returns a list of snapshots for the volume.
+func (d *btrfs) VolumeSnapshots(volType VolumeType, volName string, op *operations.Operation) ([]string, error) {
+ return btrfsSubvolumeSnapshotsGet(GetVolumeSnapshotDir(d.name, volType, volName))
+}
+
+// UpdateVolume applies config changes to the volume.
+func (d *btrfs) UpdateVolume(vol Volume, changedConfig map[string]string) error {
+ if vol.contentType != ContentTypeFS {
+ return fmt.Errorf("Content type not supported")
+ }
+
+ if vol.volType != VolumeTypeCustom {
+ return fmt.Errorf("Volume type not supported")
+ }
+
+ return d.SetVolumeQuota(vol.volType, vol.name, vol.config["size"], nil)
+}
+
+// RenameVolume renames a volume and its snapshots.
+func (d *btrfs) RenameVolume(volType VolumeType, volName string, newVolName string, op *operations.Operation) error {
+ srcVolumePath := GetVolumeMountPath(d.name, volType, volName)
+ dstVolumePath := GetVolumeMountPath(d.name, volType, newVolName)
+
+ err := os.Rename(srcVolumePath, dstVolumePath)
+ if err != nil {
+ return err
+ }
+
+ srcSnapshotDir := GetVolumeSnapshotDir(d.name, volType, volName)
+ dstSnapshotDir := GetVolumeSnapshotDir(d.name, volType, newVolName)
+
+ err = os.Rename(srcSnapshotDir, dstSnapshotDir)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// RestoreVolume restores a volume from a snapshot.
+func (d *btrfs) RestoreVolume(vol Volume, snapshotName string, op *operations.Operation) error {
+ source := GetVolumeMountPath(d.name, vol.volType, GetSnapshotVolumeName(vol.name, snapshotName))
+
+ return btrfsSubvolumesSnapshotCreate(source, vol.MountPath(), false, true, d.state.OS.RunningInUserNS)
+}
+
+// DeleteVolume deletes a volume of the storage device. If any snapshots of the volume remain then
+// this function will return an error.
+func (d *btrfs) DeleteVolume(volType VolumeType, volName string, op *operations.Operation) error {
+ snapshots, err := d.VolumeSnapshots(volType, volName, op)
+ if err != nil {
+ return err
+ }
+
+ if len(snapshots) > 0 {
+ return fmt.Errorf("Cannot remove a volume that has snapshots")
+ }
+
+ volPath := GetVolumeMountPath(d.name, volType, volName)
+
+ if !shared.PathExists(volPath) || !isBtrfsSubvolume(volPath) {
+ return nil
+ }
+
+ err = btrfsSubvolumeDelete(volPath)
+ if err != nil {
+ return err
+ }
+
+ // Although the volume snapshot directory should already be removed, lets remove it here
+ // to just in case the top-level directory is left.
+ err = deleteParentSnapshotDirIfEmpty(d.name, volType, volName)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// MountVolume simulates mounting a volume. As dir driver doesn't have volumes to mount it returns
+// false indicating that there is no need to issue an unmount.
+func (d *btrfs) MountVolume(volType VolumeType, volName string, op *operations.Operation) (bool, error) {
+ return false, nil
+}
+
+// MountVolumeSnapshot sets up a read-only mount on top of the snapshot to avoid accidental modifications.
+func (d *btrfs) MountVolumeSnapshot(volType VolumeType, volName, snapshotName string, op *operations.Operation) (bool, error) {
+ return false, ErrNotImplemented
+}
+
+// UnmountVolume simulates unmounting a volume. As dir driver doesn't have volumes to unmount it
+// returns false indicating the volume was already unmounted.
+func (d *btrfs) UnmountVolume(volType VolumeType, volName string, op *operations.Operation) (bool, error) {
+ return false, nil
+}
+
+// UnmountVolumeSnapshot removes the read-only mount placed on top of a snapshot.
+func (d *btrfs) UnmountVolumeSnapshot(volType VolumeType, volName, snapshotName string, op *operations.Operation) (bool, error) {
+ return false, nil
+}
+
+// SetVolumeQuota sets the quota on the volume.
+func (d *btrfs) SetVolumeQuota(volType VolumeType, volName, size string, op *operations.Operation) error {
+ volPath := GetVolumeMountPath(d.name, volType, volName)
+
+ volID, err := d.getVolID(volType, volName)
+ if err != nil {
+ return err
+ }
+
+ return d.setQuota(volPath, volID, size)
+}
+
+// quotaProjectID generates a project quota ID from a volume ID.
+func (d *btrfs) quotaProjectID(volID int64) uint32 {
+ return uint32(volID + 10000)
+}
+
+// initQuota initialises the project quota on the path. The volID generates a quota project ID.
+func (d *btrfs) initQuota(path string, volID int64) error {
+ if volID == 0 {
+ return fmt.Errorf("Missing volume ID")
+ }
+
+ ok, err := quota.Supported(path)
+ if err != nil || !ok {
+ // Skipping quota as underlying filesystem doesn't suppport project quotas.
+ return nil
+ }
+
+ err = quota.SetProject(path, d.quotaProjectID(volID))
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// setQuota sets the project quota on the path. The volID generates a quota project ID.
+func (d *btrfs) setQuota(path string, volID int64, size string) error {
+ if volID == 0 {
+ return fmt.Errorf("Missing volume ID")
+ }
+
+ // If size not specified in volume config, then use pool's default volume.size setting.
+ if size == "" || size == "0" {
+ size = d.config["volume.size"]
+ }
+
+ sizeBytes, err := units.ParseByteSizeString(size)
+ if err != nil {
+ return err
+ }
+
+ ok, err := quota.Supported(path)
+ if err != nil || !ok {
+ if sizeBytes > 0 {
+ // Skipping quota as underlying filesystem doesn't suppport project quotas.
+ d.logger.Warn("The backing filesystem doesn't support quotas, skipping quota", log.Ctx{"path": path})
+ }
+ return nil
+ }
+
+ err = quota.SetProjectQuota(path, d.quotaProjectID(volID), sizeBytes)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// deleteQuota removes the project quota for a volID from a path.
+func (d *btrfs) deleteQuota(path string, volID int64) error {
+ if volID == 0 {
+ return fmt.Errorf("Missing volume ID")
+ }
+
+ ok, err := quota.Supported(path)
+ if err != nil || !ok {
+ // Skipping quota as underlying filesystem doesn't suppport project quotas.
+ return nil
+ }
+
+ err = quota.SetProject(path, 0)
+ if err != nil {
+ return err
+ }
+
+ err = quota.SetProjectQuota(path, d.quotaProjectID(volID), 0)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// CreateVolumeSnapshot creates a snapshot of a volume.
+func (d *btrfs) CreateVolumeSnapshot(volType VolumeType, volName string, newSnapshotName string, op *operations.Operation) error {
+ sourcePath := GetVolumeMountPath(d.name, volType, volName)
+ targetPath := GetVolumeMountPath(d.name, volType, newSnapshotName)
+
+ return btrfsSubvolumesSnapshotCreate(sourcePath, targetPath, true, true, d.state.OS.RunningInUserNS)
+}
+
+// DeleteVolumeSnapshot removes a snapshot from the storage device. The volName and snapshotName
+// must be bare names and should not be in the format "volume/snapshot".
+func (d *btrfs) DeleteVolumeSnapshot(volType VolumeType, volName string, snapshotName string, op *operations.Operation) error {
+ fullSnapshotName := GetSnapshotVolumeName(volName, snapshotName)
+ return btrfsSubvolumesDelete(GetVolumeMountPath(d.name, volType, fullSnapshotName))
+}
+
+// RenameVolumeSnapshot renames a volume snapshot.
+func (d *btrfs) RenameVolumeSnapshot(volType VolumeType, volName string, snapshotName string, newSnapshotName string, op *operations.Operation) error {
+ oldFullSnapshotName := GetSnapshotVolumeName(volName, snapshotName)
+ newFullSnapshotName := GetSnapshotVolumeName(volName, newSnapshotName)
+
+ oldPath := GetVolumeMountPath(d.name, volType, oldFullSnapshotName)
+ newPath := GetVolumeMountPath(d.name, volType, newFullSnapshotName)
+
+ return os.Rename(oldPath, newPath)
+}
+
+func (d *btrfs) getMountOptions() string {
+ if d.config["btrfs.mount_options"] != "" {
+ return d.config["btrfs.mount_options"]
+ }
+
+ return "user_subvol_rm_allowed"
+}
diff --git a/lxd/storage/drivers/driver_btrfs_utils.go b/lxd/storage/drivers/driver_btrfs_utils.go
new file mode 100644
index 0000000000..47e5ec2b93
--- /dev/null
+++ b/lxd/storage/drivers/driver_btrfs_utils.go
@@ -0,0 +1,380 @@
+package drivers
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/lxc/lxd/lxd/util"
+ "github.com/lxc/lxd/shared"
+ "github.com/lxc/lxd/shared/logger"
+ "golang.org/x/sys/unix"
+)
+
+var errBtrfsNoQuota = fmt.Errorf("Quotas disabled on filesystem")
+var errBtrfsNoQGroup = fmt.Errorf("Unable to find quota group")
+
+func btrfsSubvolumeCreate(subvol string) error {
+ parentDestPath := filepath.Dir(subvol)
+ if !shared.PathExists(parentDestPath) {
+ err := os.MkdirAll(parentDestPath, 0711)
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err := shared.RunCommand(
+ "btrfs",
+ "subvolume",
+ "create",
+ subvol)
+ if err != nil {
+ logger.Errorf("Failed to create BTRFS subvolume \"%s\": %v", subvol, err)
+ return err
+ }
+
+ return nil
+}
+
+func btrfsSubvolumesGet(path string) ([]string, error) {
+ result := []string{}
+
+ if !strings.HasSuffix(path, "/") {
+ path = path + "/"
+ }
+
+ // Unprivileged users can't get to fs internals
+ filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error {
+ // Skip walk errors
+ if err != nil {
+ return nil
+ }
+
+ // Ignore the base path
+ if strings.TrimRight(fpath, "/") == strings.TrimRight(path, "/") {
+ return nil
+ }
+
+ // Subvolumes can only be directories
+ if !fi.IsDir() {
+ return nil
+ }
+
+ // Check if a btrfs subvolume
+ if isBtrfsSubvolume(fpath) {
+ result = append(result, strings.TrimPrefix(fpath, path))
+ }
+
+ return nil
+ })
+
+ return result, nil
+}
+
+func btrfsSubvolumeSnapshotsGet(path string) ([]string, error) {
+ result := []string{}
+
+ if !strings.HasSuffix(path, "/") {
+ path = path + "/"
+ }
+
+ // Unprivileged users can't get to fs internals
+ filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error {
+ // Skip walk errors
+ if err != nil {
+ return nil
+ }
+
+ // Ignore the base path
+ if strings.TrimRight(fpath, "/") == strings.TrimRight(path, "/") {
+ return nil
+ }
+
+ // Subvolumes can only be directories
+ if !fi.IsDir() {
+ return nil
+ }
+
+ // Check if a btrfs subvolume snapshot
+ if isBtrfsSubvolumeSnapshot(fpath) {
+ result = append(result, strings.TrimPrefix(fpath, path))
+ }
+
+ return nil
+ })
+
+ return result, nil
+}
+
+// isBtrfsSubvolume returns true if the given Path is a btrfs subvolume else
+// false.
+func isBtrfsSubvolume(subvolPath string) bool {
+ fs := unix.Stat_t{}
+ err := unix.Lstat(subvolPath, &fs)
+ if err != nil {
+ return false
+ }
+
+ // Check if BTRFS_FIRST_FREE_OBJECTID
+ if fs.Ino != 256 {
+ return false
+ }
+
+ return true
+}
+
+// isBtrfsSubvolumeSnapshot returns true if the given Path is a btrfs subvolume
+// snapshot else false.
+func isBtrfsSubvolumeSnapshot(subvolPath string) bool {
+ fs := unix.Stat_t{}
+ err := unix.Lstat(subvolPath, &fs)
+ if err != nil {
+ return false
+ }
+
+ return fs.Ino == 258
+}
+
+func isOnBtrfs(path string) bool {
+ fs := unix.Statfs_t{}
+
+ err := unix.Statfs(path, &fs)
+ if err != nil {
+ return false
+ }
+
+ if fs.Type != util.FilesystemSuperMagicBtrfs {
+ return false
+ }
+
+ return true
+}
+
+func btrfsLookupFsUUID(fs string) (string, error) {
+ output, err := shared.RunCommand(
+ "btrfs",
+ "filesystem",
+ "show",
+ "--raw",
+ fs)
+ if err != nil {
+ return "", fmt.Errorf("failed to detect UUID")
+ }
+
+ outputString := output
+ idx := strings.Index(outputString, "uuid: ")
+ outputString = outputString[idx+6:]
+ outputString = strings.TrimSpace(outputString)
+ idx = strings.Index(outputString, "\t")
+ outputString = outputString[:idx]
+ outputString = strings.Trim(outputString, "\n")
+
+ return outputString, nil
+}
+
+// btrfsSubvolumesDelete is the recursive variant on btrfsSubvolumeDelete,
+// it first deletes subvolumes of the subvolume and then the
+// subvolume itself.
+func btrfsSubvolumesDelete(subvol string) error {
+ // Delete subsubvols.
+ subsubvols, err := btrfsSubvolumesGet(subvol)
+ if err != nil {
+ return err
+ }
+ sort.Sort(sort.Reverse(sort.StringSlice(subsubvols)))
+
+ for _, subsubvol := range subsubvols {
+ err := btrfsSubvolumeDelete(path.Join(subvol, subsubvol))
+ if err != nil {
+ return err
+ }
+ }
+
+ // Delete the subvol itself
+ err = btrfsSubvolumeDelete(subvol)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func btrfsSubvolumeDelete(subvol string) error {
+ // Attempt (but don't fail on) to delete any qgroup on the subvolume
+ qgroup, err := btrfsSubvolumeQGroup(subvol)
+ if err == nil {
+ shared.RunCommand(
+ "btrfs",
+ "qgroup",
+ "destroy",
+ qgroup,
+ subvol)
+ }
+
+ // Attempt to make the subvolume writable
+ shared.RunCommand("btrfs", "property", "set", subvol, "ro", "false")
+
+ // Delete the subvolume itself
+ _, err = shared.RunCommand(
+ "btrfs",
+ "subvolume",
+ "delete",
+ subvol)
+
+ return err
+}
+
+func btrfsSubvolumeQGroup(subvol string) (string, error) {
+ output, err := shared.RunCommand(
+ "btrfs",
+ "qgroup",
+ "show",
+ "-e",
+ "-f",
+ subvol)
+
+ if err != nil {
+ return "", errBtrfsNoQuota
+ }
+
+ var qgroup string
+ for _, line := range strings.Split(output, "\n") {
+ if line == "" || strings.HasPrefix(line, "qgroupid") || strings.HasPrefix(line, "---") {
+ continue
+ }
+
+ fields := strings.Fields(line)
+ if len(fields) != 4 {
+ continue
+ }
+
+ qgroup = fields[0]
+ }
+
+ if qgroup == "" {
+ return "", errBtrfsNoQGroup
+ }
+
+ return qgroup, nil
+}
+
+func isBtrfsFilesystem(path string) bool {
+ _, err := shared.RunCommand("btrfs", "filesystem", "show", path)
+ if err != nil {
+ return false
+ }
+
+ return true
+}
+
+func btrfsPoolVolumeQGroupUsage(subvol string) (int64, error) {
+ output, err := shared.RunCommand(
+ "btrfs",
+ "qgroup",
+ "show",
+ "-e",
+ "-f",
+ subvol)
+
+ if err != nil {
+ return -1, fmt.Errorf("BTRFS quotas not supported. Try enabling them with \"btrfs quota enable\"")
+ }
+
+ for _, line := range strings.Split(output, "\n") {
+ if line == "" || strings.HasPrefix(line, "qgroupid") || strings.HasPrefix(line, "---") {
+ continue
+ }
+
+ fields := strings.Fields(line)
+ if len(fields) != 4 {
+ continue
+ }
+
+ usage, err := strconv.ParseInt(fields[2], 10, 64)
+ if err != nil {
+ continue
+ }
+
+ return usage, nil
+ }
+
+ return -1, fmt.Errorf("Unable to find current qgroup usage")
+}
+
+func btrfsSubvolumesSnapshotCreate(source string, dest string, readonly bool, recursive bool, userns bool) error {
+ // Now snapshot all subvolumes of the root.
+ if recursive {
+ // Get a list of subvolumes of the root
+ subsubvols, err := btrfsSubvolumesGet(source)
+ if err != nil {
+ return err
+ }
+ sort.Sort(sort.StringSlice(subsubvols))
+
+ if len(subsubvols) > 0 && readonly {
+ // A root with subvolumes can never be readonly,
+ // also don't make subvolumes readonly.
+ readonly = false
+
+ logger.Warnf("Subvolumes detected, ignoring ro flag")
+ }
+
+ // First snapshot the root
+ err = btrfsSubvolumeSnapshotCreate(source, dest, readonly, userns)
+ if err != nil {
+ return err
+ }
+
+ for _, subsubvol := range subsubvols {
+ // Clear the target for the subvol to use
+ os.Remove(path.Join(dest, subsubvol))
+
+ err := btrfsSubvolumeSnapshotCreate(path.Join(source, subsubvol), path.Join(dest, subsubvol), readonly, userns)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ err := btrfsSubvolumeSnapshotCreate(source, dest, readonly, userns)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func btrfsSubvolumeSnapshotCreate(source string, dest string, readonly bool, userns bool) error {
+ var output string
+ var err error
+ if readonly && !userns {
+ output, err = shared.RunCommand(
+ "btrfs",
+ "subvolume",
+ "snapshot",
+ "-r",
+ source,
+ dest)
+ } else {
+ output, err = shared.RunCommand(
+ "btrfs",
+ "subvolume",
+ "snapshot",
+ source,
+ dest)
+ }
+ if err != nil {
+ return fmt.Errorf(
+ "subvolume snapshot failed, source=%s, dest=%s, output=%s",
+ source,
+ dest,
+ output,
+ )
+ }
+
+ return err
+}
diff --git a/lxd/storage/drivers/load.go b/lxd/storage/drivers/load.go
index bced6925e7..34fe37c5d0 100644
--- a/lxd/storage/drivers/load.go
+++ b/lxd/storage/drivers/load.go
@@ -8,6 +8,7 @@ import (
var drivers = map[string]func() driver{
"dir": func() driver { return &dir{} },
"cephfs": func() driver { return &cephfs{} },
+ "btrfs": func() driver { return &btrfs{} },
}
// Load returns a Driver for an existing low-level storage pool.
More information about the lxc-devel
mailing list