[lxc-devel] [lxd/master] Rework backups
stgraber on Github
lxc-bot at linuxcontainers.org
Thu Aug 23 05:34:25 UTC 2018
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20180823/2859fe0e/attachment.bin>
-------------- next part --------------
From 4cbfa326463a21e4e988f3ca4b813f098675ec82 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Wed, 22 Aug 2018 17:23:19 -0400
Subject: [PATCH 1/4] lxc/export: Don't crash on failure to delete backup
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Closes #4959
Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
lxc/export.go | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/lxc/export.go b/lxc/export.go
index 1a38c21309..6afc32c186 100644
--- a/lxc/export.go
+++ b/lxc/export.go
@@ -86,8 +86,10 @@ func (c *cmdExport) Run(cmd *cobra.Command, args []string) error {
defer func() {
// Delete backup after we're done
- op, _ = d.DeleteContainerBackup(name, backupName)
- op.Wait()
+ op, err = d.DeleteContainerBackup(name, backupName)
+ if err == nil {
+ op.Wait()
+ }
}()
var targetName string
From 60f3ee96f3102e38266bb6bdc1e3f6546ab9aa71 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 23 Aug 2018 01:07:57 -0400
Subject: [PATCH 2/4] shared: Return decompressor in DetectCompression
---
lxd/images.go | 6 +++---
shared/archive_linux.go | 32 ++++++++++++++++++--------------
2 files changed, 21 insertions(+), 17 deletions(-)
diff --git a/lxd/images.go b/lxd/images.go
index 749cde9f58..57a83a2fb2 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -737,7 +737,7 @@ func imagesPost(d *Daemon, r *http.Request) Response {
func getImageMetadata(fname string) (*api.ImageMetadata, error) {
metadataName := "metadata.yaml"
- compressionArgs, _, err := shared.DetectCompression(fname)
+ compressionArgs, _, _, err := shared.DetectCompression(fname)
if err != nil {
return nil, fmt.Errorf(
@@ -1690,7 +1690,7 @@ func imageExport(d *Daemon, r *http.Request) Response {
imagePath := shared.VarPath("images", imgInfo.Fingerprint)
rootfsPath := imagePath + ".rootfs"
- _, ext, err := shared.DetectCompression(imagePath)
+ _, ext, _, err := shared.DetectCompression(imagePath)
if err != nil {
ext = ""
}
@@ -1705,7 +1705,7 @@ func imageExport(d *Daemon, r *http.Request) Response {
// Recompute the extension for the root filesystem, it may use a different
// compression algorithm than the metadata.
- _, ext, err = shared.DetectCompression(rootfsPath)
+ _, ext, _, err = shared.DetectCompression(rootfsPath)
if err != nil {
ext = ""
}
diff --git a/shared/archive_linux.go b/shared/archive_linux.go
index bc3ebe00af..ca359763cf 100644
--- a/shared/archive_linux.go
+++ b/shared/archive_linux.go
@@ -3,6 +3,7 @@ package shared
import (
"bytes"
"fmt"
+ "io"
"os"
"strings"
"syscall"
@@ -10,13 +11,17 @@ import (
"github.com/lxc/lxd/shared/logger"
)
-func DetectCompression(fname string) ([]string, string, error) {
+func DetectCompression(fname string) ([]string, string, []string, error) {
f, err := os.Open(fname)
if err != nil {
- return []string{""}, "", err
+ return nil, "", nil, err
}
defer f.Close()
+ return DetectCompressionFile(f)
+}
+
+func DetectCompressionFile(f io.ReadSeeker) ([]string, string, []string, error) {
// read header parts to detect compression method
// bz2 - 2 bytes, 'BZ' signature/magic number
// gz - 2 bytes, 0x1f 0x8b
@@ -24,34 +29,33 @@ func DetectCompression(fname string) ([]string, string, error) {
// xy - 6 bytes, header format { 0xFD, '7', 'z', 'X', 'Z', 0x00 }
// tar - 263 bytes, trying to get ustar from 257 - 262
header := make([]byte, 263)
- _, err = f.Read(header)
+ _, err := f.Read(header)
if err != nil {
- return []string{""}, "", err
+ return nil, "", nil, err
}
switch {
case bytes.Equal(header[0:2], []byte{'B', 'Z'}):
- return []string{"-jxf"}, ".tar.bz2", nil
+ return []string{"-jxf"}, ".tar.bz2", []string{"bzip2", "-d"}, nil
case bytes.Equal(header[0:2], []byte{0x1f, 0x8b}):
- return []string{"-zxf"}, ".tar.gz", nil
+ return []string{"-zxf"}, ".tar.gz", []string{"gzip", "-d"}, nil
case (bytes.Equal(header[1:5], []byte{'7', 'z', 'X', 'Z'}) && header[0] == 0xFD):
- return []string{"-Jxf"}, ".tar.xz", nil
+ return []string{"-Jxf"}, ".tar.xz", []string{"xz", "-d"}, nil
case (bytes.Equal(header[1:5], []byte{'7', 'z', 'X', 'Z'}) && header[0] != 0xFD):
- return []string{"--lzma", "-xf"}, ".tar.lzma", nil
+ return []string{"--lzma", "-xf"}, ".tar.lzma", []string{"lzma", "-d"}, nil
case bytes.Equal(header[0:3], []byte{0x5d, 0x00, 0x00}):
- return []string{"--lzma", "-xf"}, ".tar.lzma", nil
+ return []string{"--lzma", "-xf"}, ".tar.lzma", []string{"lzma", "-d"}, nil
case bytes.Equal(header[257:262], []byte{'u', 's', 't', 'a', 'r'}):
- return []string{"-xf"}, ".tar", nil
+ return []string{"-xf"}, ".tar", []string{""}, nil
case bytes.Equal(header[0:4], []byte{'h', 's', 'q', 's'}):
- return []string{""}, ".squashfs", nil
+ return []string{""}, ".squashfs", nil, nil
default:
- return []string{""}, "", fmt.Errorf("Unsupported compression")
+ return nil, "", nil, fmt.Errorf("Unsupported compression")
}
-
}
func Unpack(file string, path string, blockBackend bool, runningInUserns bool) error {
- extractArgs, extension, err := DetectCompression(file)
+ extractArgs, extension, _, err := DetectCompression(file)
if err != nil {
return err
}
From 9e64d5f0a82ab2c17e9a6412e26cc7fee329c87f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 23 Aug 2018 01:09:26 -0400
Subject: [PATCH 3/4] lxd/containers: Don't return nil on Storage calls
---
lxd/container_lxc.go | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 5343362961..3aa17a3036 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -6194,6 +6194,10 @@ func (c *containerLXC) tarStoreFile(linkmap map[uint64]string, offset int, tw *t
// Storage functions
func (c *containerLXC) Storage() storage {
+ if c.storage == nil {
+ c.initStorage()
+ }
+
return c.storage
}
From fe8a55a7b2de6dad9044bad60b4e5d097c5c9a1e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 23 Aug 2018 01:32:26 -0400
Subject: [PATCH 4/4] lxd/backups: Rework to behave as intended
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This reworks the backup code to have it generate tarballs at creation
time rather than at access time.
It also removes a lot of duplicate logic in the per-backend code and
adds a patch to convert existing backups.
Closes #4959
Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
lxd/backup.go | 240 ++++++++++++++++++++++-----------
lxd/container.go | 58 +-------
lxd/container_backup.go | 31 ++---
lxd/container_lxc.go | 33 ++++-
lxd/containers_post.go | 2 +-
lxd/patches.go | 100 ++++++++++++++
lxd/response.go | 20 ---
lxd/storage.go | 8 --
lxd/storage_btrfs.go | 220 ++++++++++--------------------
lxd/storage_ceph.go | 104 ++-------------
lxd/storage_ceph_utils.go | 54 ++++----
lxd/storage_dir.go | 220 ++++++------------------------
lxd/storage_lvm.go | 251 ++++++-----------------------------
lxd/storage_lvm_utils.go | 32 -----
lxd/storage_mock.go | 12 --
lxd/storage_volumes_utils.go | 1 -
lxd/storage_zfs.go | 196 +++++++++------------------
lxd/sys/fs.go | 7 +-
18 files changed, 565 insertions(+), 1024 deletions(-)
diff --git a/lxd/backup.go b/lxd/backup.go
index 2beaba0485..d17b256a64 100644
--- a/lxd/backup.go
+++ b/lxd/backup.go
@@ -18,7 +18,62 @@ import (
"github.com/lxc/lxd/shared/api"
)
-// backup represents a container backup.
+// Load a backup from the database
+func backupLoadByName(s *state.State, name string) (*backup, error) {
+ // Get the backup database record
+ args, err := s.Cluster.ContainerGetBackup(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // Load the container it belongs to
+ c, err := containerLoadById(s, args.ContainerID)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the backup struct
+ return &backup{
+ state: s,
+ container: c,
+ id: args.ID,
+ name: name,
+ creationDate: args.CreationDate,
+ expiryDate: args.ExpiryDate,
+ containerOnly: args.ContainerOnly,
+ optimizedStorage: args.OptimizedStorage,
+ }, nil
+}
+
+// Create a new backup
+func backupCreate(s *state.State, args db.ContainerBackupArgs, sourceContainer container) error {
+ // Create the database entry
+ err := s.Cluster.ContainerBackupCreate(args)
+ if err != nil {
+ if err == db.ErrAlreadyDefined {
+ return fmt.Errorf("backup '%s' already exists", args.Name)
+ }
+
+ return err
+ }
+
+ // Get the backup struct
+ b, err := backupLoadByName(s, args.Name)
+ if err != nil {
+ return err
+ }
+
+ // Now create the empty snapshot
+ err = sourceContainer.Storage().ContainerBackupCreate(*b, sourceContainer)
+ if err != nil {
+ s.Cluster.ContainerBackupRemove(args.Name)
+ return err
+ }
+
+ return nil
+}
+
+// backup represents a container backup
type backup struct {
state *state.State
container container
@@ -41,24 +96,37 @@ type backupInfo struct {
HasBinaryFormat bool `json:"-" yaml:"-"`
}
-// Rename renames a container backup.
+// Rename renames a container backup
func (b *backup) Rename(newName string) error {
- ourStart, err := b.container.StorageStart()
- if err != nil {
- return err
- }
- if ourStart {
- defer b.container.StorageStop()
+ oldBackupPath := shared.VarPath("backups", b.name)
+ newBackupPath := shared.VarPath("backups", newName)
+
+ // Create the new backup path
+ backupsPath := shared.VarPath("backups", b.container.Name())
+ if !shared.PathExists(backupsPath) {
+ err := os.MkdirAll(backupsPath, 0700)
+ if err != nil {
+ return err
+ }
}
- // Rename the directories and files
- err = b.container.Storage().ContainerBackupRename(*b, newName)
+ // Rename the backup directory
+ err := os.Rename(oldBackupPath, newBackupPath)
if err != nil {
return err
}
- // Rename the database entry
- err = b.state.Cluster.ContainerBackupRename(b.Name(), newName)
+ // Check if we can remove the container directory
+ empty, _ := shared.PathIsEmpty(backupsPath)
+ if empty {
+ err := os.Remove(backupsPath)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Rename the database record
+ err = b.state.Cluster.ContainerBackupRename(b.name, newName)
if err != nil {
return err
}
@@ -66,24 +134,30 @@ func (b *backup) Rename(newName string) error {
return nil
}
-// Delete removes a container backup.
+// Delete removes a container backup
func (b *backup) Delete() error {
- ourStart, err := b.container.StorageStart()
- if err != nil {
- return err
- }
- if ourStart {
- defer b.container.StorageStop()
+ backupPath := shared.VarPath("backups", b.name)
+
+ // Delete the on-disk data
+ if shared.PathExists(backupPath) {
+ err := os.RemoveAll(backupPath)
+ if err != nil {
+ return err
+ }
}
- // Delete backup from storage
- err = b.container.Storage().ContainerBackupDelete(b.Name())
- if err != nil {
- return err
+ // Check if we can remove the container directory
+ backupsPath := shared.VarPath("backups", b.container.Name())
+ empty, _ := shared.PathIsEmpty(backupsPath)
+ if empty {
+ err := os.Remove(backupsPath)
+ if err != nil {
+ return err
+ }
}
// Remove the database record
- err = b.state.Cluster.ContainerBackupRemove(b.Name())
+ err := b.state.Cluster.ContainerBackupRemove(b.name)
if err != nil {
return err
}
@@ -91,24 +165,6 @@ func (b *backup) Delete() error {
return nil
}
-// Dump dumps the container including its snapshots.
-func (b *backup) Dump() ([]byte, error) {
- ourStart, err := b.container.StorageStart()
- if err != nil {
- return nil, err
- }
- if ourStart {
- defer b.container.StorageStop()
- }
-
- data, err := b.container.Storage().ContainerBackupDump(*b)
- if err != nil {
- return nil, err
- }
-
- return data, nil
-}
-
func (b *backup) Render() *api.ContainerBackup {
return &api.ContainerBackup{
Name: strings.SplitN(b.name, "/", 2)[1],
@@ -119,41 +175,22 @@ func (b *backup) Render() *api.ContainerBackup {
}
}
-func (b *backup) Id() int {
- return b.id
-}
-
-func (b *backup) Name() string {
- return b.name
-}
-
-func (b *backup) CreationDate() time.Time {
- return b.creationDate
-}
-
-func (b *backup) ExpiryDate() time.Time {
- return b.expiryDate
-}
-
-func (b *backup) ContainerOnly() bool {
- return b.containerOnly
-}
+func backupGetInfo(r io.ReadSeeker) (*backupInfo, error) {
+ var buf bytes.Buffer
+ var tr *tar.Reader
+ result := backupInfo{}
+ hasBinaryFormat := false
+ hasIndexFile := false
-func (b *backup) OptimizedStorage() bool {
- return b.optimizedStorage
-}
+ // Extract
+ r.Seek(0, 0)
-func getBackupInfo(r io.Reader) (*backupInfo, error) {
- var buf bytes.Buffer
- err := shared.RunCommandWithFds(r, &buf, "unxz", "-")
+ err := shared.RunCommandWithFds(r, &buf, "xz", "-d")
if err != nil {
return nil, err
}
- result := backupInfo{}
- hasBinaryFormat := false
- hasIndexFile := false
- tr := tar.NewReader(&buf)
+ tr = tar.NewReader(&buf)
for {
hdr, err := tr.Next()
if err == io.EOF {
@@ -188,7 +225,7 @@ func getBackupInfo(r io.Reader) (*backupInfo, error) {
// fixBackupStoragePool changes the pool information in the backup.yaml. This
// is done only if the provided pool doesn't exist. In this case, the pool of
// the default profile will be used.
-func fixBackupStoragePool(c *db.Cluster, b backupInfo) error {
+func backupFixStoragePool(c *db.Cluster, b backupInfo) error {
// Get the default profile
_, profile, err := c.ProfileGet("default")
if err != nil {
@@ -251,17 +288,14 @@ func fixBackupStoragePool(c *db.Cluster, b backupInfo) error {
return nil
}
-func createBackupIndexFile(container container, backup backup) error {
- pool, err := container.StoragePool()
- if err != nil {
- return err
- }
+func backupCreateTarball(path string, backup backup) error {
+ container := backup.container
- file, err := os.Create(filepath.Join(getBackupMountPoint(pool, backup.Name()), "index.yaml"))
+ // Create the index
+ pool, err := container.StoragePool()
if err != nil {
return err
}
- defer file.Close()
indexFile := backupInfo{
Name: container.Name(),
@@ -271,11 +305,12 @@ func createBackupIndexFile(container container, backup backup) error {
Snapshots: []string{},
}
- if !backup.ContainerOnly() {
+ if !backup.containerOnly {
snaps, err := container.Snapshots()
if err != nil {
return err
}
+
for _, snap := range snaps {
_, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
indexFile.Snapshots = append(indexFile.Snapshots, snapName)
@@ -287,7 +322,52 @@ func createBackupIndexFile(container container, backup backup) error {
return err
}
+ file, err := os.Create(filepath.Join(path, "index.yaml"))
+ if err != nil {
+ return err
+ }
+
_, err = file.Write(data)
+ file.Close()
+ if err != nil {
+ return err
+ }
+
+ // Create the target path if needed
+ backupsPath := shared.VarPath("backups", backup.container.Name())
+ if !shared.PathExists(backupsPath) {
+ err := os.MkdirAll(backupsPath, 0700)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Create the tarball
+ backupPath := shared.VarPath("backups", backup.name)
+ args := []string{"-cf", backupPath, "--xattrs", "-C", path, "--transform", "s,^./,backup/,", "."}
+ _, err = shared.RunCommand("tar", args...)
+ if err != nil {
+ return err
+ }
+
+ // Compress it
+ compressedPath, err := compressFile(backupPath, "xz")
+ if err != nil {
+ return err
+ }
+
+ err = os.Remove(backupPath)
+ if err != nil {
+ return err
+ }
+
+ err = os.Rename(compressedPath, backupPath)
+ if err != nil {
+ return err
+ }
+
+ // Set permissions
+ err = os.Chmod(backupPath, 0600)
if err != nil {
return err
}
diff --git a/lxd/container.go b/lxd/container.go
index 8ac7374b1a..40956b360c 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -716,7 +716,7 @@ func containerCreateFromBackup(s *state.State, info backupInfo, data io.ReadSeek
if fixBackupFile {
// Use the default pool since the pool provided in the backup.yaml
// doesn't exist.
- err = fixBackupStoragePool(s.Cluster, info)
+ err = backupFixStoragePool(s.Cluster, info)
if err != nil {
return err
}
@@ -1252,59 +1252,3 @@ func containerLoadAllInternal(cts []db.ContainerArgs, s *state.State) ([]contain
return containers, nil
}
-
-func containerBackupLoadByName(s *state.State, name string) (*backup, error) {
- // Get the DB record
- args, err := s.Cluster.ContainerGetBackup(name)
- if err != nil {
- return nil, err
- }
-
- c, err := containerLoadById(s, args.ContainerID)
- if err != nil {
- return nil, err
- }
-
- return &backup{
- state: s,
- container: c,
- id: args.ID,
- name: name,
- creationDate: args.CreationDate,
- expiryDate: args.ExpiryDate,
- containerOnly: args.ContainerOnly,
- optimizedStorage: args.OptimizedStorage,
- }, nil
-}
-
-func containerBackupCreate(s *state.State, args db.ContainerBackupArgs,
- sourceContainer container) error {
- err := s.Cluster.ContainerBackupCreate(args)
- if err != nil {
- if err == db.ErrAlreadyDefined {
- return fmt.Errorf("backup '%s' already exists", args.Name)
- }
- return err
- }
-
- b, err := containerBackupLoadByName(s, args.Name)
- if err != nil {
- return err
- }
-
- // Now create the empty snapshot
- err = sourceContainer.Storage().ContainerBackupCreate(*b, sourceContainer)
- if err != nil {
- s.Cluster.ContainerBackupRemove(args.Name)
- return err
- }
-
- // Create index.yaml containing information regarding the backup
- err = createBackupIndexFile(sourceContainer, *b)
- if err != nil {
- s.Cluster.ContainerBackupRemove(args.Name)
- return err
- }
-
- return nil
-}
diff --git a/lxd/container_backup.go b/lxd/container_backup.go
index 5eb6124080..9a04002cb2 100644
--- a/lxd/container_backup.go
+++ b/lxd/container_backup.go
@@ -46,7 +46,7 @@ func containerBackupsGet(d *Daemon, r *http.Request) Response {
for _, backup := range backups {
if !recursion {
url := fmt.Sprintf("/%s/containers/%s/backups/%s",
- version.APIVersion, cname, strings.Split(backup.Name(), "/")[1])
+ version.APIVersion, cname, strings.Split(backup.name, "/")[1])
resultString = append(resultString, url)
} else {
render := backup.Render()
@@ -78,14 +78,6 @@ func containerBackupsPost(d *Daemon, r *http.Request) Response {
return SmartError(err)
}
- ourStart, err := c.StorageStart()
- if err != nil {
- return InternalError(err)
- }
- if ourStart {
- defer c.StorageStop()
- }
-
req := api.ContainerBackupsPost{}
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil {
@@ -105,11 +97,11 @@ func containerBackupsPost(d *Daemon, r *http.Request) Response {
for _, backup := range backups {
// Ignore backups not containing base
- if !strings.HasPrefix(backup.Name(), base) {
+ if !strings.HasPrefix(backup.name, base) {
continue
}
- substr := backup.Name()[length:]
+ substr := backup.name[length:]
var num int
count, err := fmt.Sscanf(substr, "%d", &num)
if err != nil || count != 1 {
@@ -140,7 +132,7 @@ func containerBackupsPost(d *Daemon, r *http.Request) Response {
OptimizedStorage: req.OptimizedStorage,
}
- err := containerBackupCreate(d.State(), args, c)
+ err := backupCreate(d.State(), args, c)
if err != nil {
return err
}
@@ -175,7 +167,7 @@ func containerBackupGet(d *Daemon, r *http.Request) Response {
}
fullName := name + shared.SnapshotDelimiter + backupName
- backup, err := containerBackupLoadByName(d.State(), fullName)
+ backup, err := backupLoadByName(d.State(), fullName)
if err != nil {
return SmartError(err)
}
@@ -208,7 +200,7 @@ func containerBackupPost(d *Daemon, r *http.Request) Response {
}
oldName := name + shared.SnapshotDelimiter + backupName
- backup, err := containerBackupLoadByName(d.State(), oldName)
+ backup, err := backupLoadByName(d.State(), oldName)
if err != nil {
SmartError(err)
}
@@ -250,7 +242,7 @@ func containerBackupDelete(d *Daemon, r *http.Request) Response {
}
fullName := name + shared.SnapshotDelimiter + backupName
- backup, err := containerBackupLoadByName(d.State(), fullName)
+ backup, err := backupLoadByName(d.State(), fullName)
if err != nil {
return SmartError(err)
}
@@ -290,15 +282,14 @@ func containerBackupExportGet(d *Daemon, r *http.Request) Response {
}
fullName := name + shared.SnapshotDelimiter + backupName
- backup, err := containerBackupLoadByName(d.State(), fullName)
+ backup, err := backupLoadByName(d.State(), fullName)
if err != nil {
return SmartError(err)
}
- data, err := backup.Dump()
- if err != nil {
- return SmartError(err)
+ ent := fileResponseEntry{
+ path: shared.VarPath("backups", backup.name),
}
- return BackupResponse(data)
+ return FileResponse(r, []fileResponseEntry{ent}, nil, false)
}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 3aa17a3036..0beed0e234 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -3178,7 +3178,7 @@ func (c *containerLXC) Backups() ([]backup, error) {
// Build the backup list
backups := []backup{}
for _, backupName := range backupNames {
- backup, err := containerBackupLoadByName(c.state, backupName)
+ backup, err := backupLoadByName(c.state, backupName)
if err != nil {
return nil, err
}
@@ -3386,13 +3386,26 @@ func (c *containerLXC) Delete() error {
}
}
} else {
- // Remove all snapshot
+ // Remove all snapshots
err := containerDeleteSnapshots(c.state, c.Name())
if err != nil {
logger.Warn("Failed to delete snapshots", log.Ctx{"name": c.Name(), "err": err})
return err
}
+ // Remove all backups
+ backups, err := c.Backups()
+ if err != nil {
+ return err
+ }
+
+ for _, backup := range backups {
+ err = backup.Delete()
+ if err != nil {
+ return err
+ }
+ }
+
// Clean things up
c.cleanup()
@@ -3529,6 +3542,22 @@ func (c *containerLXC) Rename(newName string) error {
}
}
+ // Rename the backups
+ backups, err := c.Backups()
+ if err != nil {
+ return err
+ }
+
+ for _, backup := range backups {
+ backupName := strings.Split(backup.name, "/")[1]
+ newName := fmt.Sprintf("%s/%s", newName, backupName)
+
+ err = backup.Rename(newName)
+ if err != nil {
+ return err
+ }
+ }
+
// Rename the database entry
err = c.state.Cluster.ContainerRename(oldName, newName)
if err != nil {
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 22f57d3a26..d142981c61 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -552,7 +552,7 @@ func createFromBackup(d *Daemon, data io.Reader) Response {
// Parse the backup information
f.Seek(0, 0)
- bInfo, err := getBackupInfo(f)
+ bInfo, err := backupGetInfo(f)
if err != nil {
return BadRequest(err)
}
diff --git a/lxd/patches.go b/lxd/patches.go
index 874fe2e567..8dc8cb7d75 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -59,6 +59,7 @@ var patches = []patch{
{name: "container_config_regen", run: patchContainerConfigRegen},
{name: "lvm_node_specific_config_keys", run: patchLvmNodeSpecificConfigKeys},
{name: "candid_rename_config_key", run: patchCandidConfigKey},
+ {name: "move_backups", run: patchMoveBackups},
}
type patch struct {
@@ -2961,6 +2962,105 @@ func patchCandidConfigKey(name string, d *Daemon) error {
})
}
+func patchMoveBackups(name string, d *Daemon) error {
+ // Get all storage pools
+ pools, err := d.cluster.StoragePools()
+ if err != nil {
+ return err
+ }
+
+ // Get all containers
+ containers, err := d.cluster.ContainersNodeList(db.CTypeRegular)
+ if err != nil {
+ return err
+ }
+
+ // Convert the backups
+ for _, pool := range pools {
+ poolBackupPath := shared.VarPath("storage-pools", pool, "backups")
+
+ // Check if we have any backup
+ if !shared.PathExists(poolBackupPath) {
+ continue
+ }
+
+ // Look at the list of backups
+ cts, err := ioutil.ReadDir(poolBackupPath)
+ if err != nil {
+ return err
+ }
+
+ for _, ct := range cts {
+ if !shared.StringInSlice(ct.Name(), containers) {
+ // Backups for a deleted container, remove it
+ err = os.RemoveAll(filepath.Join(poolBackupPath, ct.Name()))
+ if err != nil {
+ return err
+ }
+
+ continue
+ }
+
+ backups, err := ioutil.ReadDir(filepath.Join(poolBackupPath, ct.Name()))
+ if err != nil {
+ return err
+ }
+
+ if len(backups) > 0 {
+ // Create the target path if needed
+ backupsPath := shared.VarPath("backups", ct.Name())
+ if !shared.PathExists(backupsPath) {
+ err := os.MkdirAll(backupsPath, 0700)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ for _, backup := range backups {
+ // Create the tarball
+ backupPath := shared.VarPath("backups", ct.Name(), backup.Name())
+ path := filepath.Join(poolBackupPath, ct.Name(), backup.Name())
+ args := []string{"-cf", backupPath, "--xattrs", "-C", path, "--transform", "s,^./,backup/,", "."}
+ _, err = shared.RunCommand("tar", args...)
+ if err != nil {
+ return err
+ }
+
+ // Compress it
+ compressedPath, err := compressFile(backupPath, "xz")
+ if err != nil {
+ return err
+ }
+
+ err = os.Remove(backupPath)
+ if err != nil {
+ return err
+ }
+
+ err = os.Rename(compressedPath, backupPath)
+ if err != nil {
+ return err
+ }
+
+ // Set permissions
+ err = os.Chmod(backupPath, 0600)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ // Wipe the backup directory
+ err = os.RemoveAll(poolBackupPath)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
// Patches end here
// Here are a couple of legacy patches that were originally in
diff --git a/lxd/response.go b/lxd/response.go
index 256341759e..b33394383c 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -27,26 +27,6 @@ type Response interface {
String() string
}
-// Backup response
-type backupResponse struct {
- data []byte
-}
-
-func (r *backupResponse) Render(w http.ResponseWriter) error {
- w.Header().Set("Content-Type", "application/octet-stream")
- w.Header().Set("Content-Length", fmt.Sprintf("%d", len(r.data)))
- _, err := io.Copy(w, bytes.NewReader(r.data))
- return err
-}
-
-func (r *backupResponse) String() string {
- return fmt.Sprintf("%d bytes", len(r.data))
-}
-
-func BackupResponse(data []byte) Response {
- return &backupResponse{data: data}
-}
-
// Sync response
type syncResponse struct {
success bool
diff --git a/lxd/storage.go b/lxd/storage.go
index 8982f63d7b..6d6b98baac 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -188,9 +188,6 @@ type storage interface {
ContainerSnapshotStop(c container) (bool, error)
ContainerBackupCreate(backup backup, sourceContainer container) error
- ContainerBackupDelete(name string) error
- ContainerBackupRename(backup backup, newName string) error
- ContainerBackupDump(backup backup) ([]byte, error)
ContainerBackupLoad(info backupInfo, data io.ReadSeeker) error
// For use in migrating snapshots.
@@ -598,11 +595,6 @@ func getStoragePoolVolumeMountPoint(poolName string, volumeName string) string {
return shared.VarPath("storage-pools", poolName, "custom", volumeName)
}
-// ${LXD_DIR}/storage-pools/<pool>/backups/<backup_name>
-func getBackupMountPoint(poolName string, backupName string) string {
- return shared.VarPath("storage-pools", poolName, "backups", backupName)
-}
-
func createContainerMountpoint(mountPoint string, mountPointSymlink string, privileged bool) error {
var mode os.FileMode
if privileged {
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index a78ce9616e..a39b4cb21f 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -1,7 +1,6 @@
package main
import (
- "bytes"
"fmt"
"io"
"io/ioutil"
@@ -896,16 +895,6 @@ func (s *storageBtrfs) ContainerDelete(container container) error {
}
}
- backups, err := container.Backups()
- if err != nil {
- return err
- }
-
- for _, backup := range backups {
- backupName := strings.Split(backup.Name(), "/")[1]
- s.ContainerBackupDelete(backupName)
- }
-
logger.Debugf("Deleted BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
return nil
}
@@ -1175,17 +1164,6 @@ func (s *storageBtrfs) ContainerRename(container container, newName string) erro
}
}
- backups, err := container.Backups()
- if err != nil {
- return err
- }
-
- for _, backup := range backups {
- backupName := strings.Split(backup.Name(), "/")[1]
- newName := fmt.Sprintf("%s/%s", newName, backupName)
- s.ContainerBackupRename(backup, newName)
- }
-
logger.Debugf("Renamed BTRFS storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
return nil
}
@@ -1528,63 +1506,49 @@ func (s *storageBtrfs) doBtrfsBackup(cur string, prev string, target string) err
return err
}
-func (s *storageBtrfs) doContainerBackupCreateOptimized(backup backup, source container) error {
- // /var/lib/lxd/storage-pools/<pool>/backups
- baseMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
-
- // /var/lib/lxd/storage-pools/<pool>/backups/<container>/container
- err := os.MkdirAll(baseMntPoint, 0711)
- if err != nil {
- logger.Errorf("Failed to create directory \"%s\": %s", baseMntPoint, err)
- return err
- }
-
- // retrieve snapshots
- snapshots, err := source.Snapshots()
- if err != nil {
- return err
- }
-
+func (s *storageBtrfs) doContainerBackupCreateOptimized(tmpPath string, backup backup, source container) error {
+ // Handle snapshots
finalParent := ""
- if !backup.containerOnly && len(snapshots) > 0 {
- // /var/lib/lxd/storage-pools/<pool>/backups/<container>/snapshots
- targetBackupSnapshotsMntPoint := fmt.Sprintf("%s/snapshots", baseMntPoint)
- err = os.MkdirAll(targetBackupSnapshotsMntPoint, 0711)
+ if !backup.containerOnly {
+ snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
+
+ // Retrieve the snapshots
+ snapshots, err := source.Snapshots()
if err != nil {
- logger.Errorf("Failed to create directory \"%s\": %s", targetBackupSnapshotsMntPoint, err)
return err
}
- logger.Debugf("Created directory \"%s\"", targetBackupSnapshotsMntPoint)
+
+ // Create the snapshot path
+ if len(snapshots) > 0 {
+ err = os.MkdirAll(snapshotsPath, 0711)
+ if err != nil {
+ return err
+ }
+ }
for i, snap := range snapshots {
+ _, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
+
+ // Figure out previous and current subvolumes
prev := ""
if i > 0 {
- // /var/lib/lxd/storage-pools/<pool>/snapshots/<container>/<snapshot>
prev = getSnapshotMountPoint(s.pool.Name, snapshots[i-1].Name())
}
-
- // /var/lib/lxd/storage-pools/<pool>/snapshots/<container>/<snapshot>
cur := getSnapshotMountPoint(s.pool.Name, snap.Name())
- _, snapOnlyName, _ := containerGetParentAndSnapshotName(snap.Name())
-
- // /var/lib/lxd/storage-pools/<pool>/backups/<container>/snapshots/<snapshot>
- target := fmt.Sprintf("%s/%s.bin", targetBackupSnapshotsMntPoint, snapOnlyName)
+ // Make a binary btrfs backup
+ target := fmt.Sprintf("%s/%s.bin", snapshotsPath, snapName)
err := s.doBtrfsBackup(cur, prev, target)
if err != nil {
- logger.Errorf("Failed to btrfs send snapshot \"%s\" -p \"%s\" to \"%s\": %s", cur, prev, target, err)
return err
}
- logger.Debugf("Performed btrfs send snapshot \"%s\" -p \"%s\" to \"%s\"", cur, prev, target)
- // /var/lib/lxd/storage-pools/<pool>/snapshots/<container>/<snapshot>
finalParent = cur
}
}
- // /var/lib/lxd/storage-pools/<pool>/containers/<container>
+ // Make a temporary copy of the container
sourceVolume := getContainerMountPoint(s.pool.Name, source.Name())
-
containersPath := getContainerMountPoint(s.pool.Name, "")
tmpContainerMntPoint, err := ioutil.TempDir(containersPath, source.Name())
if err != nil {
@@ -1596,71 +1560,68 @@ func (s *storageBtrfs) doContainerBackupCreateOptimized(backup backup, source co
if err != nil {
return err
}
- // /var/lib/lxd/storage-pools/<pool>/containers/<container-tmp>/.backup
- targetVolume := fmt.Sprintf("%s/.backup", tmpContainerMntPoint)
+ targetVolume := fmt.Sprintf("%s/.backup", tmpContainerMntPoint)
err = s.btrfsPoolVolumesSnapshot(sourceVolume, targetVolume, true, true)
if err != nil {
- logger.Errorf("Failed to create read-only btrfs snapshot \"%s\" of \"%s\": %s", targetVolume, sourceVolume, err)
return err
}
defer btrfsSubVolumesDelete(targetVolume)
- // /var/lib/lxd/storage-pools/<pool>/backups/<container>/container/<container>
- fsDump := fmt.Sprintf("%s/container.bin", baseMntPoint)
+ // Dump the container to a file
+ fsDump := fmt.Sprintf("%s/container.bin", tmpPath)
err = s.doBtrfsBackup(targetVolume, finalParent, fsDump)
if err != nil {
- logger.Errorf("Failed to btrfs send container \"%s\" -p \"%s\" to \"%s\": %s", targetVolume, finalParent, fsDump, err)
return err
}
return nil
}
-func (s *storageBtrfs) doContainerBackupCreateVanilla(backup backup, source container) error {
- // Create the path for the backup.
- baseMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- targetBackupContainerMntPoint := fmt.Sprintf("%s/container", baseMntPoint)
- err := os.MkdirAll(targetBackupContainerMntPoint, 0711)
- if err != nil {
- return err
- }
-
- // retrieve snapshots
- snapshots, err := source.Snapshots()
- if err != nil {
- return err
- }
-
+func (s *storageBtrfs) doContainerBackupCreateVanilla(tmpPath string, backup backup, source container) error {
+ // Prepare for rsync
rsync := func(oldPath string, newPath string, bwlimit string) error {
output, err := rsyncLocalCopy(oldPath, newPath, bwlimit)
if err != nil {
- s.ContainerBackupDelete(backup.Name())
- return fmt.Errorf("failed to rsync: %s: %s", string(output), err)
+ return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
}
+
return nil
}
bwlimit := s.pool.Config["rsync.bwlimit"]
- if !backup.containerOnly && len(snapshots) > 0 {
- // /var/lib/lxd/storage-pools/<pool>/backups/<container>/snapshots
- targetBackupSnapshotsMntPoint := fmt.Sprintf("%s/snapshots", baseMntPoint)
- err = os.MkdirAll(targetBackupSnapshotsMntPoint, 0711)
+
+ // Handle snapshots
+ if !backup.containerOnly {
+ snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
+
+ // Retrieve the snapshots
+ snapshots, err := source.Snapshots()
if err != nil {
- logger.Errorf("Failed to create directory \"%s\": %s", targetBackupSnapshotsMntPoint, err)
return err
}
- logger.Debugf("Created directory \"%s\"", targetBackupSnapshotsMntPoint)
+
+ // Create the snapshot path
+ if len(snapshots) > 0 {
+ err = os.MkdirAll(snapshotsPath, 0711)
+ if err != nil {
+ return err
+ }
+ }
for _, snap := range snapshots {
+ _, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
+
+ // Mount the snapshot to a usable path
_, err := s.ContainerSnapshotStart(snap)
if err != nil {
return err
}
snapshotMntPoint := getSnapshotMountPoint(s.pool.Name, snap.Name())
- _, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
- target := fmt.Sprintf("%s/%s", targetBackupSnapshotsMntPoint, snapName)
+ target := fmt.Sprintf("%s/%s", snapshotsPath, snapName)
+
+ // Copy the snapshot
err = rsync(snapshotMntPoint, target, bwlimit)
s.ContainerSnapshotStop(snap)
if err != nil {
@@ -1669,9 +1630,8 @@ func (s *storageBtrfs) doContainerBackupCreateVanilla(backup backup, source cont
}
}
- // /var/lib/lxd/storage-pools/<pool>/containers/<container>
+ // Make a temporary copy of the container
sourceVolume := getContainerMountPoint(s.pool.Name, source.Name())
-
containersPath := getContainerMountPoint(s.pool.Name, "")
tmpContainerMntPoint, err := ioutil.TempDir(containersPath, source.Name())
if err != nil {
@@ -1683,16 +1643,17 @@ func (s *storageBtrfs) doContainerBackupCreateVanilla(backup backup, source cont
if err != nil {
return err
}
- // /var/lib/lxd/storage-pools/<pool>/containers/<container-tmp>/.backup
+
targetVolume := fmt.Sprintf("%s/.backup", tmpContainerMntPoint)
err = s.btrfsPoolVolumesSnapshot(sourceVolume, targetVolume, true, true)
if err != nil {
- logger.Errorf("Failed to create read-only btrfs snapshot \"%s\" of \"%s\": %s", targetVolume, sourceVolume, err)
return err
}
defer btrfsSubVolumesDelete(targetVolume)
- err = rsync(targetVolume, targetBackupContainerMntPoint, bwlimit)
+ // Copy the container
+ containerPath := fmt.Sprintf("%s/container", tmpPath)
+ err = rsync(targetVolume, containerPath, bwlimit)
if err != nil {
return err
}
@@ -1701,9 +1662,7 @@ func (s *storageBtrfs) doContainerBackupCreateVanilla(backup backup, source cont
}
func (s *storageBtrfs) ContainerBackupCreate(backup backup, source container) error {
- logger.Debugf("Creating BTRFS storage volume for backup \"%s\" on storage pool \"%s\"", backup.Name(), s.pool.Name)
-
- // start storage
+ // Start storage
ourStart, err := source.StorageStart()
if err != nil {
return err
@@ -1712,74 +1671,33 @@ func (s *storageBtrfs) ContainerBackupCreate(backup backup, source container) er
defer source.StorageStop()
}
- if backup.optimizedStorage {
- logger.Debugf("Created BTRFS storage volume for backup \"%s\" on storage pool \"%s\"", backup.Name(), s.pool.Name)
- return s.doContainerBackupCreateOptimized(backup, source)
- }
-
- return s.doContainerBackupCreateVanilla(backup, source)
-}
-
-func (s *storageBtrfs) ContainerBackupDelete(name string) error {
- logger.Debugf("Deleting BTRFS storage volume for backup \"%s\" on storage pool \"%s\"", name, s.pool.Name)
- backupContainerMntPoint := getBackupMountPoint(s.pool.Name, name)
- if shared.PathExists(backupContainerMntPoint) {
- err := os.RemoveAll(backupContainerMntPoint)
- if err != nil {
- return err
- }
+ // Create a temporary path for the backup
+ tmpPath, err := ioutil.TempDir(shared.VarPath("backups"), "lxd_backup_")
+ if err != nil {
+ return err
}
+ defer os.RemoveAll(tmpPath)
- sourceContainerName, _, _ := containerGetParentAndSnapshotName(name)
- backupContainerPath := getBackupMountPoint(s.pool.Name, sourceContainerName)
- empty, _ := shared.PathIsEmpty(backupContainerPath)
- if empty == true {
- err := os.Remove(backupContainerPath)
+ // Generate the actual backup
+ if backup.optimizedStorage {
+ err = s.doContainerBackupCreateOptimized(tmpPath, backup, source)
if err != nil {
return err
}
- }
-
- logger.Debugf("Deleted BTRFS storage volume for backup \"%s\" on storage pool \"%s\"", name, s.pool.Name)
- return nil
-}
-
-func (s *storageBtrfs) ContainerBackupRename(backup backup, newName string) error {
- logger.Debugf("Renaming BTRFS storage volume for backup \"%s\" from %s to %s", backup.Name(), backup.Name(), newName)
- oldBackupMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- newBackupMntPoint := getBackupMountPoint(s.pool.Name, newName)
-
- // Rename directory
- if shared.PathExists(oldBackupMntPoint) {
- err := os.Rename(oldBackupMntPoint, newBackupMntPoint)
+ } else {
+ err := s.doContainerBackupCreateVanilla(tmpPath, backup, source)
if err != nil {
return err
}
}
- logger.Debugf("Renamed BTRFS storage volume for backup \"%s\" from %s to %s", backup.Name(), backup.Name(), newName)
- return nil
-}
-
-func (s *storageBtrfs) ContainerBackupDump(backup backup) ([]byte, error) {
- backupMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- logger.Debugf("Taring up \"%s\" on storage pool \"%s\"", backupMntPoint, s.pool.Name)
-
- args := []string{"-cJf", "-", "--xattrs", "-C", backupMntPoint, "--transform", "s,^./,backup/,"}
- if backup.ContainerOnly() {
- // Exclude snapshots directory
- args = append(args, "--exclude", fmt.Sprintf("%s/snapshots", backup.Name()))
- }
- args = append(args, ".")
-
- var buffer bytes.Buffer
- err := shared.RunCommandWithFds(nil, &buffer, "tar", args...)
+ // Pack the backup
+ err = backupCreateTarball(tmpPath, backup)
if err != nil {
- return nil, err
+ return err
}
- logger.Debugf("Tared up \"%s\" on storage pool \"%s\"", backupMntPoint, s.pool.Name)
- return buffer.Bytes(), nil
+ return nil
}
func (s *storageBtrfs) doContainerBackupLoadOptimized(info backupInfo, data io.ReadSeeker) error {
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index da4708e008..2dd3ccd0af 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -1,9 +1,9 @@
package main
import (
- "bytes"
"fmt"
"io"
+ "io/ioutil"
"os"
"strings"
"syscall"
@@ -988,17 +988,6 @@ func (s *storageCeph) ContainerDelete(container container) error {
containerName, s.pool.Name, err)
return err
}
- logger.Debugf(`Deleted mountpoint %s for RBD storage volume of container "%s" for RBD storage volume on storage pool "%s"`, containerMntPoint, containerName, s.pool.Name)
-
- backups, err := container.Backups()
- if err != nil {
- return err
- }
-
- for _, backup := range backups {
- backupName := strings.Split(backup.Name(), "/")[1]
- s.ContainerBackupDelete(backupName)
- }
logger.Debugf(`Deleted RBD storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
return nil
@@ -1559,17 +1548,6 @@ func (s *storageCeph) ContainerRename(c container, newName string) error {
}
}
- backups, err := c.Backups()
- if err != nil {
- return err
- }
-
- for _, backup := range backups {
- backupName := strings.Split(backup.Name(), "/")[1]
- newName := fmt.Sprintf("%s/%s", newName, backupName)
- s.ContainerBackupRename(backup, newName)
- }
-
logger.Debugf(`Renamed RBD storage volume for container "%s" from "%s" to "%s"`, oldName, oldName, newName)
revert = false
@@ -1903,9 +1881,7 @@ func (s *storageCeph) ContainerSnapshotCreateEmpty(c container) error {
}
func (s *storageCeph) ContainerBackupCreate(backup backup, source container) error {
- logger.Debugf("Creating backup for container \"%s\" on storage pool \"%s\"", backup.Name(), s.pool.Name)
-
- // mount storage
+ // Start storage
ourStart, err := source.StorageStart()
if err != nil {
return err
@@ -1914,13 +1890,14 @@ func (s *storageCeph) ContainerBackupCreate(backup backup, source container) err
defer source.StorageStop()
}
- baseMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- // create the path for the backup
- err = os.MkdirAll(baseMntPoint, 0711)
+ // Create a temporary path for the backup
+ tmpPath, err := ioutil.TempDir(shared.VarPath("backups"), "lxd_backup_")
if err != nil {
return err
}
+ defer os.RemoveAll(tmpPath)
+ // Generate the actual backup
if !backup.containerOnly {
snapshots, err := source.Snapshots()
if err != nil {
@@ -1928,82 +1905,25 @@ func (s *storageCeph) ContainerBackupCreate(backup backup, source container) err
}
for _, snap := range snapshots {
- err := s.cephRBDVolumeBackupCreate(backup, snap)
+ err := s.cephRBDVolumeBackupCreate(tmpPath, backup, snap)
if err != nil {
return err
}
}
}
- err = s.cephRBDVolumeBackupCreate(backup, source)
+ err = s.cephRBDVolumeBackupCreate(tmpPath, backup, source)
if err != nil {
return err
}
- logger.Debugf("Created backup for container \"%s\" on storage pool \"%s\"", backup.Name(), s.pool.Name)
- return nil
-}
-
-func (s *storageCeph) ContainerBackupDelete(name string) error {
- logger.Debugf("Deleting RBD storage volume for backup \"%s\" on storage pool \"%s\"", name, s.pool.Name)
- backupContainerMntPoint := getBackupMountPoint(s.pool.Name, name)
- if shared.PathExists(backupContainerMntPoint) {
- err := os.RemoveAll(backupContainerMntPoint)
- if err != nil {
- return err
- }
- }
-
- sourceContainerName, _, _ := containerGetParentAndSnapshotName(name)
- backupContainerPath := getBackupMountPoint(s.pool.Name, sourceContainerName)
- empty, _ := shared.PathIsEmpty(backupContainerPath)
- if empty == true {
- err := os.Remove(backupContainerPath)
- if err != nil {
- return err
- }
- }
-
- logger.Debugf("Deleted RBD storage volume for backup \"%s\" on storage pool \"%s\"", name, s.pool.Name)
- return nil
-}
-
-func (s *storageCeph) ContainerBackupRename(backup backup, newName string) error {
- logger.Debugf("Renaming RBD storage volume for backup \"%s\" from %s to %s", backup.Name(), backup.Name(), newName)
- oldBackupMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- newBackupMntPoint := getBackupMountPoint(s.pool.Name, newName)
-
- // Rename directory
- if shared.PathExists(oldBackupMntPoint) {
- err := os.Rename(oldBackupMntPoint, newBackupMntPoint)
- if err != nil {
- return err
- }
- }
-
- logger.Debugf("Renamed RBD storage volume for backup \"%s\" from %s to %s", backup.Name(), backup.Name(), newName)
- return nil
-}
-
-func (s *storageCeph) ContainerBackupDump(backup backup) ([]byte, error) {
- backupMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- logger.Debugf("Taring up \"%s\" on storage pool \"%s\"", backupMntPoint, s.pool.Name)
-
- args := []string{"-cJf", "-", "--xattrs", "-C", backupMntPoint, "--transform", "s,^./,backup/,"}
- if backup.ContainerOnly() {
- // Exclude snapshots directory
- args = append(args, "--exclude", fmt.Sprintf("%s/snapshots", backup.Name()))
- }
- args = append(args, ".")
-
- var buffer bytes.Buffer
- err := shared.RunCommandWithFds(nil, &buffer, "tar", args...)
+ // Pack the backup
+ err = backupCreateTarball(tmpPath, backup)
if err != nil {
- return nil, err
+ return err
}
- logger.Debugf("Tared up \"%s\" on storage pool \"%s\"", backupMntPoint, s.pool.Name)
- return buffer.Bytes(), nil
+ return nil
}
// This function recreates an rbd container including its snapshots. It
diff --git a/lxd/storage_ceph_utils.go b/lxd/storage_ceph_utils.go
index 8f2ef34caf..9a75f2b4e2 100644
--- a/lxd/storage_ceph_utils.go
+++ b/lxd/storage_ceph_utils.go
@@ -1597,11 +1597,24 @@ func (s *storageCeph) cephRBDVolumeDumpToFile(sourceVolumeName string, file stri
}
// cephRBDVolumeBackupCreate creates a backup of a container or snapshot.
-func (s *storageCeph) cephRBDVolumeBackupCreate(backup backup, source container) error {
+func (s *storageCeph) cephRBDVolumeBackupCreate(tmpPath string, backup backup, source container) error {
sourceIsSnapshot := source.IsSnapshot()
sourceContainerName := source.Name()
sourceContainerOnlyName := sourceContainerName
sourceSnapshotOnlyName := ""
+
+ // Prepare for rsync
+ rsync := func(oldPath string, newPath string, bwlimit string) error {
+ output, err := rsyncLocalCopy(oldPath, newPath, bwlimit)
+ if err != nil {
+ return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
+ }
+
+ return nil
+ }
+
+ bwlimit := s.pool.Config["rsync.bwlimit"]
+ // Create a temporary snapshot
snapshotName := fmt.Sprintf("zombie_snapshot_%s", uuid.NewRandom().String())
if sourceIsSnapshot {
sourceContainerOnlyName, sourceSnapshotOnlyName, _ = containerGetParentAndSnapshotName(sourceContainerName)
@@ -1621,13 +1634,14 @@ func (s *storageCeph) cephRBDVolumeBackupCreate(backup backup, source container)
defer cephRBDSnapshotDelete(s.ClusterName, s.OSDPoolName, sourceContainerName, storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
}
- // protect volume so we can create clones of it
+ // Protect volume so we can create clones of it
err := cephRBDSnapshotProtect(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
if err != nil {
return err
}
defer cephRBDSnapshotUnprotect(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
+ // Create a new volume from the snapshot
cloneName := uuid.NewRandom().String()
err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.OSDPoolName, cloneName, "backup", s.UserName)
if err != nil {
@@ -1635,13 +1649,14 @@ func (s *storageCeph) cephRBDVolumeBackupCreate(backup backup, source container)
}
defer cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName, cloneName, "backup", s.UserName)
+ // Map the new volume
RBDDevPath, err := cephRBDVolumeMap(s.ClusterName, s.OSDPoolName, cloneName, "backup", s.UserName)
if err != nil {
return err
}
defer cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName, cloneName, "backup", s.UserName, true)
- // Generate a new xfs's UUID
+ // Generate a new UUID if needed
RBDFilesystem := s.getRBDFilesystem()
msg, err := fsGenerateNewUUID(RBDFilesystem, RBDDevPath)
if err != nil {
@@ -1649,23 +1664,19 @@ func (s *storageCeph) cephRBDVolumeBackupCreate(backup backup, source container)
return err
}
- containersDir := getContainerMountPoint(s.pool.Name, "")
-
- targetName := sourceContainerName
- if sourceIsSnapshot {
- _, targetName, _ = containerGetParentAndSnapshotName(sourceContainerName)
- }
- tmpContainerMntPoint, err := ioutil.TempDir(containersDir, fmt.Sprintf("backup_%s_", targetName))
+ // Create a temporary mountpoing
+ tmpContainerMntPoint, err := ioutil.TempDir("", "lxd_backup_")
if err != nil {
return err
}
defer os.RemoveAll(tmpContainerMntPoint)
- err = os.Chmod(tmpContainerMntPoint, 0711)
+ err = os.Chmod(tmpContainerMntPoint, 0700)
if err != nil {
return err
}
+ // Mount the volume
mountFlags, mountOptions := lxdResolveMountoptions(s.getRBDMountOptions())
err = tryMount(RBDDevPath, tmpContainerMntPoint, RBDFilesystem, mountFlags, mountOptions)
if err != nil {
@@ -1675,11 +1686,16 @@ func (s *storageCeph) cephRBDVolumeBackupCreate(backup backup, source container)
logger.Debugf("Mounted RBD device %s onto %s", RBDDevPath, tmpContainerMntPoint)
defer tryUnmount(tmpContainerMntPoint, syscall.MNT_DETACH)
+ // Figure out the target name
+ targetName := sourceContainerName
+ if sourceIsSnapshot {
+ _, targetName, _ = containerGetParentAndSnapshotName(sourceContainerName)
+ }
+
// Create the path for the backup.
- baseMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- targetBackupMntPoint := fmt.Sprintf("%s/container", baseMntPoint)
+ targetBackupMntPoint := fmt.Sprintf("%s/container", tmpPath)
if sourceIsSnapshot {
- targetBackupMntPoint = fmt.Sprintf("%s/snapshots/%s", baseMntPoint, targetName)
+ targetBackupMntPoint = fmt.Sprintf("%s/snapshots/%s", tmpPath, targetName)
}
err = os.MkdirAll(targetBackupMntPoint, 0711)
@@ -1687,16 +1703,6 @@ func (s *storageCeph) cephRBDVolumeBackupCreate(backup backup, source container)
return err
}
- rsync := func(oldPath string, newPath string, bwlimit string) error {
- output, err := rsyncLocalCopy(oldPath, newPath, bwlimit)
- if err != nil {
- s.ContainerBackupDelete(backup.Name())
- return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
- }
- return nil
- }
-
- bwlimit := s.pool.Config["rsync.bwlimit"]
err = rsync(tmpContainerMntPoint, targetBackupMntPoint, bwlimit)
if err != nil {
return err
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index 19f6bbf6ad..ba63a7597d 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -1,9 +1,9 @@
package main
import (
- "bytes"
"fmt"
"io"
+ "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -589,16 +589,6 @@ func (s *storageDir) ContainerDelete(container container) error {
}
}
- backups, err := container.Backups()
- if err != nil {
- return err
- }
-
- for _, backup := range backups {
- backupName := strings.Split(backup.Name(), "/")[1]
- s.ContainerBackupDelete(backupName)
- }
-
logger.Debugf("Deleted DIR storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
return nil
}
@@ -805,17 +795,6 @@ func (s *storageDir) ContainerRename(container container, newName string) error
}
}
- backups, err := container.Backups()
- if err != nil {
- return err
- }
-
- for _, backup := range backups {
- backupName := strings.Split(backup.Name(), "/")[1]
- newName := fmt.Sprintf("%s/%s", newName, backupName)
- s.ContainerBackupRename(backup, newName)
- }
-
logger.Debugf("Renamed DIR storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
return nil
}
@@ -1054,90 +1033,59 @@ func (s *storageDir) ContainerSnapshotStop(container container) (bool, error) {
return true, nil
}
-func (s *storageDir) ContainerBackupCreate(backup backup, sourceContainer container) error {
- logger.Debugf("Creating DIR storage volume for backup \"%s\" on storage pool \"%s\"",
- backup.Name(), s.pool.Name)
-
- _, err := s.StoragePoolMount()
+func (s *storageDir) ContainerBackupCreate(backup backup, source container) error {
+ // Start storage
+ ourStart, err := source.StorageStart()
if err != nil {
return err
}
+ if ourStart {
+ defer source.StorageStop()
+ }
- // Create the path for the backup.
- baseMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- targetBackupContainerMntPoint := fmt.Sprintf("%s/container",
- baseMntPoint)
- targetBackupSnapshotsMntPoint := fmt.Sprintf("%s/snapshots",
- baseMntPoint)
-
- err = os.MkdirAll(targetBackupContainerMntPoint, 0711)
+ // Create a temporary path for the backup
+ tmpPath, err := ioutil.TempDir(shared.VarPath("backups"), "lxd_backup_")
if err != nil {
return err
}
+ defer os.RemoveAll(tmpPath)
- if !backup.ContainerOnly() {
- // Create path for snapshots as well.
- err = os.MkdirAll(targetBackupSnapshotsMntPoint, 0711)
- if err != nil {
- return err
- }
- }
-
+ // Prepare for rsync
rsync := func(oldPath string, newPath string, bwlimit string) error {
output, err := rsyncLocalCopy(oldPath, newPath, bwlimit)
if err != nil {
- s.ContainerBackupDelete(backup.Name())
- return fmt.Errorf("failed to rsync: %s: %s", string(output), err)
+ return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
}
- return nil
- }
- ourStart, err := sourceContainer.StorageStart()
- if err != nil {
- return err
- }
- if ourStart {
- defer sourceContainer.StorageStop()
+ return nil
}
- _, sourcePool, _ := sourceContainer.Storage().GetContainerPoolInfo()
- sourceContainerMntPoint := getContainerMountPoint(sourcePool,
- sourceContainer.Name())
bwlimit := s.pool.Config["rsync.bwlimit"]
- err = rsync(sourceContainerMntPoint, targetBackupContainerMntPoint, bwlimit)
- if err != nil {
- return err
- }
- if sourceContainer.IsRunning() {
- // This is done to ensure consistency when snapshotting. But we
- // probably shouldn't fail just because of that.
- logger.Debugf("Trying to freeze and rsync again to ensure consistency")
+ // Handle snapshots
+ if !backup.containerOnly {
+ snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
- err := sourceContainer.Freeze()
- if err != nil {
- logger.Errorf("Trying to freeze and rsync again failed")
- }
- defer sourceContainer.Unfreeze()
-
- err = rsync(sourceContainerMntPoint, targetBackupContainerMntPoint, bwlimit)
+ // Retrieve the snapshots
+ snapshots, err := source.Snapshots()
if err != nil {
return err
}
- }
- if !backup.ContainerOnly() {
- // Backup snapshots as well.
- snaps, err := sourceContainer.Snapshots()
- if err != nil {
- return nil
+ // Create the snapshot path
+ if len(snapshots) > 0 {
+ err = os.MkdirAll(snapshotsPath, 0711)
+ if err != nil {
+ return err
+ }
}
- for _, ct := range snaps {
- snapshotMntPoint := getSnapshotMountPoint(sourcePool, ct.Name())
- _, snapName, _ := containerGetParentAndSnapshotName(ct.Name())
- target := fmt.Sprintf("%s/%s", targetBackupSnapshotsMntPoint, snapName)
+ for _, snap := range snapshots {
+ _, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
+ snapshotMntPoint := getSnapshotMountPoint(s.pool.Name, snap.Name())
+ target := fmt.Sprintf("%s/%s", snapshotsPath, snapName)
+ // Copy the snapshot
err = rsync(snapshotMntPoint, target, bwlimit)
if err != nil {
return err
@@ -1145,114 +1093,32 @@ func (s *storageDir) ContainerBackupCreate(backup backup, sourceContainer contai
}
}
- logger.Debugf("Created DIR storage volume for backup \"%s\" on storage pool \"%s\"",
- backup.Name(), s.pool.Name)
- return nil
-}
-
-func (s *storageDir) ContainerBackupDelete(name string) error {
- logger.Debugf("Deleting DIR storage volume for backup \"%s\" on storage pool \"%s\"",
- name, s.pool.Name)
-
- _, err := s.StoragePoolMount()
- if err != nil {
- return err
- }
-
- source := s.pool.Config["source"]
- if source == "" {
- return fmt.Errorf("no \"source\" property found for the storage pool")
- }
-
- err = dirBackupDeleteInternal(s.pool.Name, name)
- if err != nil {
- return err
- }
-
- logger.Debugf("Deleted DIR storage volume for backup \"%s\" on storage pool \"%s\"",
- name, s.pool.Name)
- return nil
-}
-
-func dirBackupDeleteInternal(poolName string, backupName string) error {
- backupContainerMntPoint := getBackupMountPoint(poolName, backupName)
- if shared.PathExists(backupContainerMntPoint) {
- err := os.RemoveAll(backupContainerMntPoint)
- if err != nil {
- return err
- }
- }
+ if source.IsRunning() {
+ // This is done to ensure consistency when snapshotting. But we
+ // probably shouldn't fail just because of that.
+ logger.Debugf("Freezing container '%s' for backup", source.Name())
- sourceContainerName, _, _ := containerGetParentAndSnapshotName(backupName)
- backupContainerPath := getBackupMountPoint(poolName, sourceContainerName)
- empty, _ := shared.PathIsEmpty(backupContainerPath)
- if empty == true {
- err := os.Remove(backupContainerPath)
+ err := source.Freeze()
if err != nil {
- return err
+ logger.Errorf("Failed to freeze container '%s' for backup: %v", source.Name(), err)
}
+ defer source.Unfreeze()
}
- return nil
-}
-
-func (s *storageDir) ContainerBackupRename(backup backup, newName string) error {
- logger.Debugf("Renaming DIR storage volume for backup \"%s\" from %s to %s",
- backup.Name(), backup.Name(), newName)
-
- _, err := s.StoragePoolMount()
+ // Copy the container
+ containerPath := fmt.Sprintf("%s/container", tmpPath)
+ err = rsync(source.Path(), containerPath, bwlimit)
if err != nil {
return err
}
- source := s.pool.Config["source"]
- if source == "" {
- return fmt.Errorf("no \"source\" property found for the storage pool")
- }
-
- oldBackupMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- newBackupMntPoint := getBackupMountPoint(s.pool.Name, newName)
-
- // Rename directory
- if shared.PathExists(oldBackupMntPoint) {
- err := os.Rename(oldBackupMntPoint, newBackupMntPoint)
- if err != nil {
- return err
- }
- }
-
- logger.Debugf("Renamed DIR storage volume for backup \"%s\" from %s to %s",
- backup.Name(), backup.Name(), newName)
- return nil
-}
-
-func (s *storageDir) ContainerBackupDump(backup backup) ([]byte, error) {
- _, err := s.StoragePoolMount()
+ // Pack the backup
+ err = backupCreateTarball(tmpPath, backup)
if err != nil {
- return nil, err
- }
-
- source := s.pool.Config["source"]
- if source == "" {
- return nil, fmt.Errorf("no \"source\" property found for the storage pool")
- }
-
- backupMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
-
- args := []string{"-cJf", "-", "--xattrs", "-C", backupMntPoint, "--transform", "s,^./,backup/,"}
- if backup.ContainerOnly() {
- // Exclude snapshots directory
- args = append(args, "--exclude", fmt.Sprintf("%s/snapshots", backup.Name()))
- }
- args = append(args, ".")
-
- var buffer bytes.Buffer
- err = shared.RunCommandWithFds(nil, &buffer, "tar", args...)
- if err != nil {
- return nil, err
+ return err
}
- return buffer.Bytes(), nil
+ return nil
}
func (s *storageDir) ContainerBackupLoad(info backupInfo, data io.ReadSeeker) error {
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 5e3f06ccfb..036761210e 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -1,9 +1,9 @@
package main
import (
- "bytes"
"fmt"
"io"
+ "io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -1068,68 +1068,6 @@ func (s *storageLvm) ContainerDelete(container container) error {
return err
}
- if container.IsSnapshot() {
- // Snapshots will return a empty list when calling Backups(). We need to
- // find the correct backup by iterating over the container's backups.
- ctName, snapshotName, _ := containerGetParentAndSnapshotName(container.Name())
- ct, err := containerLoadByName(s.s, ctName)
- if err != nil {
- return err
- }
-
- backups, err := ct.Backups()
- if err != nil {
- return err
- }
-
- for _, backup := range backups {
- if backup.ContainerOnly() {
- // Skip container-only backups since they don't include
- // snapshots
- continue
- }
-
- parts := strings.Split(backup.Name(), "/")
- err := s.ContainerBackupDelete(fmt.Sprintf("%s/%s/%s", ctName,
- snapshotName, parts[1]))
- if err != nil {
- return err
- }
- }
- } else {
- backups, err := container.Backups()
- if err != nil {
- return err
- }
-
- for _, backup := range backups {
- err := s.ContainerBackupDelete(backup.Name())
- if err != nil {
- return err
- }
-
- if backup.ContainerOnly() {
- continue
- }
-
- // Remove the snapshots
- snapshots, err := container.Snapshots()
- if err != nil {
- return err
- }
-
- for _, snap := range snapshots {
- ctName, snapshotName, _ := containerGetParentAndSnapshotName(snap.Name())
- parts := strings.Split(backup.Name(), "/")
- err := s.ContainerBackupDelete(fmt.Sprintf("%s/%s/%s", ctName,
- snapshotName, parts[1]))
- if err != nil {
- return err
- }
- }
- }
- }
-
logger.Debugf("Deleted LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
return nil
}
@@ -1393,29 +1331,6 @@ func (s *storageLvm) ContainerRename(container container, newContainerName strin
}
}
- // Rename backups
- if !container.IsSnapshot() {
- oldBackupPath := getBackupMountPoint(s.pool.Name, oldName)
- newBackupPath := getBackupMountPoint(s.pool.Name, newContainerName)
- if shared.PathExists(oldBackupPath) {
- err = os.Rename(oldBackupPath, newBackupPath)
- if err != nil {
- return err
- }
- }
- }
-
- backups, err := container.Backups()
- if err != nil {
- return err
- }
-
- for _, backup := range backups {
- backupName := strings.Split(backup.Name(), "/")[1]
- newName := fmt.Sprintf("%s/%s", newContainerName, backupName)
- s.ContainerBackupRename(backup, newName)
- }
-
tryUndo = false
logger.Debugf("Renamed LVM storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newContainerName)
@@ -1664,58 +1579,65 @@ func (s *storageLvm) ContainerSnapshotCreateEmpty(snapshotContainer container) e
return nil
}
-func (s *storageLvm) ContainerBackupCreate(backup backup, sourceContainer container) error {
- logger.Debugf("Creating LVM storage volume for backup \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
- // mount storage
- ourStart, err := sourceContainer.StorageStart()
+func (s *storageLvm) ContainerBackupCreate(backup backup, source container) error {
+ // Start storage
+ ourStart, err := source.StorageStart()
if err != nil {
return err
}
if ourStart {
- defer sourceContainer.StorageStop()
- }
-
- // Create the path for the backup.
- baseMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- targetBackupContainerMntPoint := fmt.Sprintf("%s/container", baseMntPoint)
- err = os.MkdirAll(targetBackupContainerMntPoint, 0711)
- if err != nil {
- return err
+ defer source.StorageStop()
}
- snapshots, err := sourceContainer.Snapshots()
+ // Create a temporary path for the backup
+ tmpPath, err := ioutil.TempDir(shared.VarPath("backups"), "lxd_backup_")
if err != nil {
return err
}
+ defer os.RemoveAll(tmpPath)
+ // Prepare for rsync
rsync := func(oldPath string, newPath string, bwlimit string) error {
output, err := rsyncLocalCopy(oldPath, newPath, bwlimit)
if err != nil {
- s.ContainerBackupDelete(backup.Name())
- return fmt.Errorf("failed to rsync: %s: %s", string(output), err)
+ return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
}
+
return nil
}
bwlimit := s.pool.Config["rsync.bwlimit"]
- if !backup.containerOnly && len(snapshots) > 0 {
- // /var/lib/lxd/storage-pools/<pool>/backups/<container>/snapshots
- targetBackupSnapshotsMntPoint := fmt.Sprintf("%s/snapshots", baseMntPoint)
- err = os.MkdirAll(targetBackupSnapshotsMntPoint, 0711)
+
+ // Handle snapshots
+ if !backup.containerOnly {
+ snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
+
+ // Retrieve the snapshots
+ snapshots, err := source.Snapshots()
if err != nil {
return err
}
+ // Create the snapshot path
+ if len(snapshots) > 0 {
+ err = os.MkdirAll(snapshotsPath, 0711)
+ if err != nil {
+ return err
+ }
+ }
+
for _, snap := range snapshots {
+ _, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
+ snapshotMntPoint := getSnapshotMountPoint(s.pool.Name, snap.Name())
+ target := fmt.Sprintf("%s/%s", snapshotsPath, snapName)
+
+ // Mount the snapshot
_, err := s.ContainerSnapshotStart(snap)
if err != nil {
return err
}
- snapshotMntPoint := getSnapshotMountPoint(s.pool.Name, snap.Name())
- _, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
- target := fmt.Sprintf("%s/%s", targetBackupSnapshotsMntPoint, snapName)
+ // Copy the snapshot
err = rsync(snapshotMntPoint, target, bwlimit)
s.ContainerSnapshotStop(snap)
if err != nil {
@@ -1724,18 +1646,16 @@ func (s *storageLvm) ContainerBackupCreate(backup backup, sourceContainer contai
}
}
- snapshotSuffix := uuid.NewRandom().String()
- sourceLvmDatasetSnapshot := fmt.Sprintf("snapshot-%s", snapshotSuffix)
-
- // /var/lib/lxd/storage-pools/<pool>/containers/<container>
+ // Make a temporary snapshot of the container
+ sourceLvmDatasetSnapshot := fmt.Sprintf("snapshot-%s", uuid.NewRandom().String())
tmpContainerMntPoint := getContainerMountPoint(s.pool.Name, sourceLvmDatasetSnapshot)
- err = os.MkdirAll(tmpContainerMntPoint, 0711)
+ err = os.MkdirAll(tmpContainerMntPoint, 0700)
if err != nil {
return err
}
defer os.RemoveAll(tmpContainerMntPoint)
- _, err = s.createSnapshotLV(s.pool.Name, sourceContainer.Name(),
+ _, err = s.createSnapshotLV(s.pool.Name, source.Name(),
storagePoolVolumeAPIEndpointContainers, containerNameToLVName(sourceLvmDatasetSnapshot),
storagePoolVolumeAPIEndpointContainers, false, s.useThinpool)
if err != nil {
@@ -1744,116 +1664,29 @@ func (s *storageLvm) ContainerBackupCreate(backup backup, sourceContainer contai
defer removeLV(s.pool.Name, storagePoolVolumeAPIEndpointContainers,
containerNameToLVName(sourceLvmDatasetSnapshot))
+ // Mount the temporary snapshot
_, err = s.doContainerMount(sourceLvmDatasetSnapshot)
if err != nil {
return err
}
defer s.ContainerUmount(sourceLvmDatasetSnapshot, "")
- err = rsync(tmpContainerMntPoint, targetBackupContainerMntPoint, bwlimit)
+ // Copy the container
+ containerPath := fmt.Sprintf("%s/container", tmpPath)
+ err = rsync(tmpContainerMntPoint, containerPath, bwlimit)
if err != nil {
return err
}
- logger.Debugf("Created LVM storage volume for backup \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
- return nil
-}
-
-func (s *storageLvm) ContainerBackupDelete(name string) error {
- logger.Debugf("Deleting LVM storage volume for backup \"%s\" on storage pool \"%s\"",
- name, s.pool.Name)
-
- _, err := s.StoragePoolMount()
+ // Pack the backup
+ err = backupCreateTarball(tmpPath, backup)
if err != nil {
return err
}
- source := s.pool.Config["source"]
- if source == "" {
- return fmt.Errorf("no \"source\" property found for the storage pool")
- }
-
- err = lvmBackupDeleteInternal(s.pool.Name, name)
- if err != nil {
- return err
- }
-
- logger.Debugf("Deleted LVM storage volume for backup \"%s\" on storage pool \"%s\"",
- name, s.pool.Name)
return nil
}
-func lvmBackupDeleteInternal(poolName string, backupName string) error {
- backupContainerMntPoint := getBackupMountPoint(poolName, backupName)
- if shared.PathExists(backupContainerMntPoint) {
- err := os.RemoveAll(backupContainerMntPoint)
- if err != nil {
- return err
- }
- }
-
- sourceContainerName, _, _ := containerGetParentAndSnapshotName(backupName)
- backupContainerPath := getBackupMountPoint(poolName, sourceContainerName)
- empty, _ := shared.PathIsEmpty(backupContainerPath)
- if empty == true {
- err := os.Remove(backupContainerPath)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-func (s *storageLvm) ContainerBackupRename(backup backup, newName string) error {
- logger.Debugf("Renaming LVM storage volume for backup \"%s\" from %s to %s",
- backup.Name(), backup.Name(), newName)
-
- _, err := s.StoragePoolMount()
- if err != nil {
- return err
- }
-
- source := s.pool.Config["source"]
- if source == "" {
- return fmt.Errorf("no \"source\" property found for the storage pool")
- }
-
- oldBackupMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- newBackupMntPoint := getBackupMountPoint(s.pool.Name, newName)
-
- // Rename directory
- if shared.PathExists(oldBackupMntPoint) {
- err := os.Rename(oldBackupMntPoint, newBackupMntPoint)
- if err != nil {
- return err
- }
- }
-
- logger.Debugf("Renamed LVM storage volume for backup \"%s\" from %s to %s",
- backup.Name(), backup.Name(), newName)
- return nil
-}
-
-func (s *storageLvm) ContainerBackupDump(backup backup) ([]byte, error) {
- var buffer bytes.Buffer
-
- args := []string{"-cJf", "-", "--xattrs", "-C", getBackupMountPoint(s.pool.Name, backup.Name()),
- "--transform", "s,^./,backup/,"}
- if backup.ContainerOnly() {
- // Exclude snapshots directory
- args = append(args, "--exclude", fmt.Sprintf("%s/snapshots", backup.Name()))
- }
- args = append(args, ".")
-
- // Create tarball
- err := shared.RunCommandWithFds(nil, &buffer, "tar", args...)
- if err != nil {
- return nil, err
- }
-
- return buffer.Bytes(), nil
-}
-
func (s *storageLvm) ContainerBackupLoad(info backupInfo, data io.ReadSeeker) error {
containerPath, err := s.doContainerBackupLoad(info.Name, info.Privileged, false)
if err != nil {
diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go
index 825fb5eab6..f685279521 100644
--- a/lxd/storage_lvm_utils.go
+++ b/lxd/storage_lvm_utils.go
@@ -285,38 +285,6 @@ func (s *storageLvm) createSnapshotContainer(snapshotContainer container, source
return nil
}
-func (s *storageLvm) createContainerBackup(backup backup, sourceContainer container,
- readonly bool) error {
- tryUndo := true
-
- sourceContainerName := sourceContainer.Name()
- sourceContainerLvmName := containerNameToLVName(sourceContainerName)
-
- // Always add the backup name as suffix, e.g. container-backup0 or
- // container-snap1-backup1.
- names := strings.Split(backup.Name(), "/")
- targetContainerLvmName := fmt.Sprintf("%s-%s", sourceContainerLvmName,
- names[len(names)-1])
-
- poolName := s.getOnDiskPoolName()
- _, err := s.createSnapshotLV(poolName, sourceContainerLvmName,
- storagePoolVolumeAPIEndpointContainers, targetContainerLvmName,
- storagePoolVolumeAPIEndpointBackups, readonly, s.useThinpool)
- if err != nil {
- return fmt.Errorf("Error creating snapshot LV: %s", err)
- }
-
- defer func() {
- if tryUndo {
- s.ContainerBackupDelete(backup.Name())
- }
- }()
-
- tryUndo = false
-
- return nil
-}
-
// Copy a container on a storage pool that does use a thinpool.
func (s *storageLvm) copyContainerThinpool(target container, source container, readonly bool) error {
err := s.createSnapshotContainer(target, source, readonly)
diff --git a/lxd/storage_mock.go b/lxd/storage_mock.go
index ac3374512f..201ea1d065 100644
--- a/lxd/storage_mock.go
+++ b/lxd/storage_mock.go
@@ -194,18 +194,6 @@ func (s *storageMock) ContainerBackupCreate(backup backup, sourceContainer conta
return nil
}
-func (s *storageMock) ContainerBackupDelete(name string) error {
- return nil
-}
-
-func (s *storageMock) ContainerBackupRename(backup backup, newName string) error {
- return nil
-}
-
-func (s *storageMock) ContainerBackupDump(backup backup) ([]byte, error) {
- return nil, nil
-}
-
func (s *storageMock) ContainerBackupLoad(info backupInfo, data io.ReadSeeker) error {
return nil
}
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index 6c1916dfaa..931b49a8b6 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -33,7 +33,6 @@ const (
storagePoolVolumeAPIEndpointContainers string = "containers"
storagePoolVolumeAPIEndpointImages string = "images"
storagePoolVolumeAPIEndpointCustom string = "custom"
- storagePoolVolumeAPIEndpointBackups string = "backups"
)
var supportedVolumeTypes = []int{storagePoolVolumeTypeContainer, storagePoolVolumeTypeImage, storagePoolVolumeTypeCustom}
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 4fe9ae6891..d96eaae42e 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -1,7 +1,6 @@
package main
import (
- "bytes"
"fmt"
"io"
"io/ioutil"
@@ -880,16 +879,6 @@ func (s *storageZfs) ContainerDelete(container container) error {
return err
}
- backups, err := container.Backups()
- if err != nil {
- return err
- }
-
- for _, backup := range backups {
- backupName := strings.Split(backup.Name(), "/")[1]
- s.ContainerBackupDelete(backupName)
- }
-
return nil
}
@@ -1420,17 +1409,6 @@ func (s *storageZfs) ContainerRename(container container, newName string) error
}
}
- backups, err := container.Backups()
- if err != nil {
- return err
- }
-
- for _, backup := range backups {
- backupName := strings.Split(backup.Name(), "/")[1]
- newName := fmt.Sprintf("%s/%s", newName, backupName)
- s.ContainerBackupRename(backup, newName)
- }
-
revert = false
logger.Debugf("Renamed ZFS storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
@@ -1784,9 +1762,7 @@ func (s *storageZfs) ContainerSnapshotCreateEmpty(snapshotContainer container) e
return nil
}
-func (s *storageZfs) doContainerOnlyBackup(backup backup, source container) error {
- logger.Debugf("Creating DIR storage volume for backup \"%s\" on storage pool \"%s\"", source.Name(), s.pool.Name)
-
+func (s *storageZfs) doContainerOnlyBackup(tmpPath string, backup backup, source container) error {
sourceIsSnapshot := source.IsSnapshot()
poolName := s.getOnDiskPoolName()
@@ -1816,14 +1792,8 @@ func (s *storageZfs) doContainerOnlyBackup(backup backup, source container) erro
}()
}
- // Create the path for the backup.
- baseMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- err := os.MkdirAll(baseMntPoint, 0711)
- if err != nil {
- return err
- }
-
- backupFile := fmt.Sprintf("%s/%s", baseMntPoint, "container.bin")
+ // Dump the container to a file
+ backupFile := fmt.Sprintf("%s/%s", tmpPath, "container.bin")
f, err := os.OpenFile(backupFile, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return err
@@ -1837,16 +1807,15 @@ func (s *storageZfs) doContainerOnlyBackup(backup backup, source container) erro
return err
}
- logger.Debugf("Created ZFS storage volume for backup \"%s\" on storage pool \"%s\"", source.Name(), s.pool.Name)
return nil
}
-func (s *storageZfs) doSnapshotBackup(backup backup, source container, parentSnapshot string) error {
+func (s *storageZfs) doSnapshotBackup(tmpPath string, backup backup, source container, parentSnapshot string) error {
sourceName := source.Name()
- baseMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- targetBackupSnapshotsMntPoint := fmt.Sprintf("%s/snapshots", baseMntPoint)
- // create backup path for snapshots.
- err := os.MkdirAll(targetBackupSnapshotsMntPoint, 0711)
+ snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
+
+ // Create backup path for snapshots
+ err := os.MkdirAll(snapshotsPath, 0711)
if err != nil {
return err
}
@@ -1861,7 +1830,7 @@ func (s *storageZfs) doSnapshotBackup(backup backup, source container, parentSna
args = append(args, "-i", parentSnapshotDataset)
}
- backupFile := fmt.Sprintf("%s/%s.bin", targetBackupSnapshotsMntPoint, sourceSnapOnlyName)
+ backupFile := fmt.Sprintf("%s/%s.bin", snapshotsPath, sourceSnapOnlyName)
f, err := os.OpenFile(backupFile, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return err
@@ -1873,22 +1842,16 @@ func (s *storageZfs) doSnapshotBackup(backup backup, source container, parentSna
return zfsSendCmd.Run()
}
-func (s *storageZfs) doContainerBackupCreateOptimized(backup backup, source container) error {
+func (s *storageZfs) doContainerBackupCreateOptimized(tmpPath string, backup backup, source container) error {
+ // Handle snapshots
snapshots, err := source.Snapshots()
if err != nil {
return err
}
- baseMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
if backup.containerOnly || len(snapshots) == 0 {
- err = s.doContainerOnlyBackup(backup, source)
+ err = s.doContainerOnlyBackup(tmpPath, backup, source)
} else {
- // create the path for the backup
- err = os.MkdirAll(baseMntPoint, 0711)
- if err != nil {
- return err
- }
-
prev := ""
prevSnapOnlyName := ""
for i, snap := range snapshots {
@@ -1903,13 +1866,13 @@ func (s *storageZfs) doContainerBackupCreateOptimized(backup backup, source cont
_, snapOnlyName, _ := containerGetParentAndSnapshotName(snap.Name())
prevSnapOnlyName = snapOnlyName
- err = s.doSnapshotBackup(backup, sourceSnapshot, prev)
+ err = s.doSnapshotBackup(tmpPath, backup, sourceSnapshot, prev)
if err != nil {
return err
}
}
- // send actual container
+ // Dump the container to a file
poolName := s.getOnDiskPoolName()
tmpSnapshotName := fmt.Sprintf("backup-%s", uuid.NewRandom().String())
err = zfsPoolVolumeSnapshotCreate(poolName, fmt.Sprintf("containers/%s", source.Name()), tmpSnapshotName)
@@ -1924,7 +1887,7 @@ func (s *storageZfs) doContainerBackupCreateOptimized(backup backup, source cont
args = append(args, "-i", parentSnapshotDataset)
}
- backupFile := fmt.Sprintf("%s/container.bin", baseMntPoint)
+ backupFile := fmt.Sprintf("%s/container.bin", tmpPath)
f, err := os.OpenFile(backupFile, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return err
@@ -1938,6 +1901,7 @@ func (s *storageZfs) doContainerBackupCreateOptimized(backup backup, source cont
if err != nil {
return err
}
+
zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", source.Name()), tmpSnapshotName)
}
if err != nil {
@@ -1947,49 +1911,50 @@ func (s *storageZfs) doContainerBackupCreateOptimized(backup backup, source cont
return nil
}
-func (s *storageZfs) doContainerBackupCreateVanilla(backup backup, source container) error {
- // Create the path for the backup.
- baseMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- targetBackupContainerMntPoint := fmt.Sprintf("%s/container", baseMntPoint)
- err := os.MkdirAll(targetBackupContainerMntPoint, 0711)
- if err != nil {
- return err
- }
-
- snapshots, err := source.Snapshots()
- if err != nil {
- return err
- }
-
+func (s *storageZfs) doContainerBackupCreateVanilla(tmpPath string, backup backup, source container) error {
+ // Prepare for rsync
rsync := func(oldPath string, newPath string, bwlimit string) error {
output, err := rsyncLocalCopy(oldPath, newPath, bwlimit)
if err != nil {
- s.ContainerBackupDelete(backup.Name())
- return fmt.Errorf("failed to rsync: %s: %s", string(output), err)
+ return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
}
+
return nil
}
bwlimit := s.pool.Config["rsync.bwlimit"]
- if !backup.containerOnly && len(snapshots) > 0 {
- // /var/lib/lxd/storage-pools/<pool>/backups/<container>/snapshots
- targetBackupSnapshotsMntPoint := fmt.Sprintf("%s/snapshots", baseMntPoint)
- err = os.MkdirAll(targetBackupSnapshotsMntPoint, 0711)
+
+ // Handle snapshots
+ if !backup.containerOnly {
+ snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
+
+ // Retrieve the snapshots
+ snapshots, err := source.Snapshots()
if err != nil {
- logger.Errorf("Failed to create directory \"%s\": %s", targetBackupSnapshotsMntPoint, err)
return err
}
- logger.Debugf("Created directory \"%s\"", targetBackupSnapshotsMntPoint)
+
+ // Create the snapshot path
+ if len(snapshots) > 0 {
+ err = os.MkdirAll(snapshotsPath, 0711)
+ if err != nil {
+ return err
+ }
+ }
for _, snap := range snapshots {
+ _, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
+
+ // Mount the snapshot to a usable path
_, err := s.ContainerSnapshotStart(snap)
if err != nil {
return err
}
snapshotMntPoint := getSnapshotMountPoint(s.pool.Name, snap.Name())
- _, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
- target := fmt.Sprintf("%s/%s", targetBackupSnapshotsMntPoint, snapName)
+ target := fmt.Sprintf("%s/%s", snapshotsPath, snapName)
+
+ // Copy the snapshot
err = rsync(snapshotMntPoint, target, bwlimit)
s.ContainerSnapshotStop(snap)
if err != nil {
@@ -1998,7 +1963,7 @@ func (s *storageZfs) doContainerBackupCreateVanilla(backup backup, source contai
}
}
- // /var/lib/lxd/storage-pools/<pool>/containers/<container>
+ // Make a temporary copy of the container
containersPath := getContainerMountPoint(s.pool.Name, "")
tmpContainerMntPoint, err := ioutil.TempDir(containersPath, source.Name())
if err != nil {
@@ -2029,6 +1994,7 @@ func (s *storageZfs) doContainerBackupCreateVanilla(backup backup, source contai
}
defer zfsPoolVolumeDestroy(poolName, targetZfsDataset)
+ // Mount the temporary copy
if !shared.IsMountPoint(tmpContainerMntPoint) {
err = zfsMount(poolName, targetZfsDataset)
if err != nil {
@@ -2037,7 +2003,9 @@ func (s *storageZfs) doContainerBackupCreateVanilla(backup backup, source contai
defer zfsUmount(poolName, targetZfsDataset, tmpContainerMntPoint)
}
- err = rsync(tmpContainerMntPoint, targetBackupContainerMntPoint, bwlimit)
+ // Copy the container
+ containerPath := fmt.Sprintf("%s/container", tmpPath)
+ err = rsync(tmpContainerMntPoint, containerPath, bwlimit)
if err != nil {
return err
}
@@ -2046,9 +2014,7 @@ func (s *storageZfs) doContainerBackupCreateVanilla(backup backup, source contai
}
func (s *storageZfs) ContainerBackupCreate(backup backup, source container) error {
- logger.Debugf("Creating backup for container \"%s\" on storage pool \"%s\"", backup.Name(), s.pool.Name)
-
- // mount storage
+ // Start storage
ourStart, err := source.StorageStart()
if err != nil {
return err
@@ -2057,73 +2023,33 @@ func (s *storageZfs) ContainerBackupCreate(backup backup, source container) erro
defer source.StorageStop()
}
- if backup.optimizedStorage {
- return s.doContainerBackupCreateOptimized(backup, source)
- }
-
- return s.doContainerBackupCreateVanilla(backup, source)
-}
-
-func (s *storageZfs) ContainerBackupDelete(name string) error {
- logger.Debugf("Deleting ZFS storage volume for backup \"%s\" on storage pool \"%s\"", name, s.pool.Name)
- backupContainerMntPoint := getBackupMountPoint(s.pool.Name, name)
- if shared.PathExists(backupContainerMntPoint) {
- err := os.RemoveAll(backupContainerMntPoint)
- if err != nil {
- return err
- }
+ // Create a temporary path for the backup
+ tmpPath, err := ioutil.TempDir(shared.VarPath("backups"), "lxd_backup_")
+ if err != nil {
+ return err
}
+ defer os.RemoveAll(tmpPath)
- sourceContainerName, _, _ := containerGetParentAndSnapshotName(name)
- backupContainerPath := getBackupMountPoint(s.pool.Name, sourceContainerName)
- empty, _ := shared.PathIsEmpty(backupContainerPath)
- if empty == true {
- err := os.Remove(backupContainerPath)
+ // Generate the actual backup
+ if backup.optimizedStorage {
+ err = s.doContainerBackupCreateOptimized(tmpPath, backup, source)
if err != nil {
return err
}
- }
-
- logger.Debugf("Deleted ZFS storage volume for backup \"%s\" on storage pool \"%s\"", name, s.pool.Name)
- return nil
-}
-
-func (s *storageZfs) ContainerBackupRename(backup backup, newName string) error {
- logger.Debugf("Renaming ZFS storage volume for backup \"%s\" from %s to %s", backup.Name(), backup.Name(), newName)
- oldBackupMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- newBackupMntPoint := getBackupMountPoint(s.pool.Name, newName)
-
- // Rename directory
- if shared.PathExists(oldBackupMntPoint) {
- err := os.Rename(oldBackupMntPoint, newBackupMntPoint)
+ } else {
+ err = s.doContainerBackupCreateVanilla(tmpPath, backup, source)
if err != nil {
return err
}
}
- logger.Debugf("Renamed ZFS storage volume for backup \"%s\" from %s to %s", backup.Name(), backup.Name(), newName)
- return nil
-}
-
-func (s *storageZfs) ContainerBackupDump(backup backup) ([]byte, error) {
- backupMntPoint := getBackupMountPoint(s.pool.Name, backup.Name())
- logger.Debugf("Taring up \"%s\" on storage pool \"%s\"", backupMntPoint, s.pool.Name)
-
- args := []string{"-cJf", "-", "--xattrs", "-C", backupMntPoint, "--transform", "s,^./,backup/,"}
- if backup.ContainerOnly() {
- // Exclude snapshots directory
- args = append(args, "--exclude", fmt.Sprintf("%s/snapshots", backup.Name()))
- }
- args = append(args, ".")
-
- var buffer bytes.Buffer
- err := shared.RunCommandWithFds(nil, &buffer, "tar", args...)
+ // Pack the backup
+ err = backupCreateTarball(tmpPath, backup)
if err != nil {
- return nil, err
+ return err
}
- logger.Debugf("Tared up \"%s\" on storage pool \"%s\"", backupMntPoint, s.pool.Name)
- return buffer.Bytes(), nil
+ return nil
}
func (s *storageZfs) doContainerBackupLoadOptimized(info backupInfo, data io.ReadSeeker) error {
diff --git a/lxd/sys/fs.go b/lxd/sys/fs.go
index 0f4e948e56..5905c433a0 100644
--- a/lxd/sys/fs.go
+++ b/lxd/sys/fs.go
@@ -40,18 +40,19 @@ func (s *OS) initDirs() error {
mode os.FileMode
}{
{s.VarDir, 0711},
+ {filepath.Join(s.VarDir, "backups"), 0700},
{s.CacheDir, 0700},
- {filepath.Join(s.VarDir, "database"), 0700},
{filepath.Join(s.VarDir, "containers"), 0711},
+ {filepath.Join(s.VarDir, "database"), 0700},
{filepath.Join(s.VarDir, "devices"), 0711},
{filepath.Join(s.VarDir, "devlxd"), 0755},
+ {filepath.Join(s.VarDir, "disks"), 0700},
{filepath.Join(s.VarDir, "images"), 0700},
{s.LogDir, 0700},
+ {filepath.Join(s.VarDir, "networks"), 0711},
{filepath.Join(s.VarDir, "security"), 0700},
{filepath.Join(s.VarDir, "shmounts"), 0711},
{filepath.Join(s.VarDir, "snapshots"), 0700},
- {filepath.Join(s.VarDir, "networks"), 0711},
- {filepath.Join(s.VarDir, "disks"), 0700},
{filepath.Join(s.VarDir, "storage-pools"), 0711},
}
More information about the lxc-devel
mailing list