[lxc-devel] [lxd/master] Add support for xfs/ext4 project quotas
stgraber on Github
lxc-bot at linuxcontainers.org
Thu Apr 11 18:37:15 UTC 2019
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 498 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20190411/168fbe75/attachment-0001.bin>
-------------- next part --------------
From 3b616ba52169a9dec20025f910db25ce4cbe1859 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 11 Apr 2019 13:27:19 -0400
Subject: [PATCH 1/4] lxd/storage: Don't hardcode default project
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
lxd/storage_btrfs.go | 4 ++--
lxd/storage_ceph.go | 2 +-
lxd/storage_lvm.go | 2 +-
lxd/storage_zfs.go | 2 +-
4 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 40be14a13e..0c9eeda306 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -1905,7 +1905,7 @@ func (s *storageBtrfs) doContainerBackupLoadOptimized(info backupInfo, data io.R
tmpContainerMntPoint := fmt.Sprintf("%s/.backup", unpackDir)
defer btrfsSubVolumesDelete(tmpContainerMntPoint)
- containerMntPoint = getContainerMountPoint("default", s.pool.Name, info.Name)
+ containerMntPoint = getContainerMountPoint(info.Project, s.pool.Name, info.Name)
err = s.btrfsPoolVolumesSnapshot(tmpContainerMntPoint, containerMntPoint, false, true)
if err != nil {
logger.Errorf("Failed to create btrfs snapshot \"%s\" of \"%s\": %s", tmpContainerMntPoint, containerMntPoint, err)
@@ -2861,7 +2861,7 @@ func (s *storageBtrfs) StorageEntitySetQuota(volumeType int, size int64, data in
switch volumeType {
case storagePoolVolumeTypeContainer:
c = data.(container)
- subvol = getContainerMountPoint("default", s.pool.Name, c.Name())
+ subvol = getContainerMountPoint(c.Project(), s.pool.Name, c.Name())
case storagePoolVolumeTypeCustom:
subvol = getStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
}
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 23ce78067a..6c2c692e9f 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -2471,7 +2471,7 @@ func (s *storageCeph) StorageEntitySetQuota(volumeType int, size int64, data int
RBDDevPath, ret = getRBDMappedDevPath(s.ClusterName,
s.OSDPoolName, storagePoolVolumeTypeNameContainer,
s.volume.Name, true, s.UserName)
- mountpoint = getContainerMountPoint("default", s.pool.Name, ctName)
+ mountpoint = getContainerMountPoint(c.Project(), s.pool.Name, ctName)
volumeName = ctName
default:
RBDDevPath, ret = getRBDMappedDevPath(s.ClusterName,
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 0c07f2c35e..778c114e52 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -2116,7 +2116,7 @@ func (s *storageLvm) StorageEntitySetQuota(volumeType int, size int64, data inte
ctLvmName := containerNameToLVName(ctName)
lvDevPath = getLvmDevPath("default", poolName, storagePoolVolumeAPIEndpointContainers, ctLvmName)
- mountpoint = getContainerMountPoint("default", s.pool.Name, ctName)
+ mountpoint = getContainerMountPoint(c.Project(), s.pool.Name, ctName)
default:
customLvmName := containerNameToLVName(s.volume.Name)
lvDevPath = getLvmDevPath("default", poolName, storagePoolVolumeAPIEndpointCustom, customLvmName)
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 735c55de7d..442563ca26 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -2155,7 +2155,7 @@ func (s *storageZfs) ContainerBackupCreate(backup backup, source container) erro
func (s *storageZfs) doContainerBackupLoadOptimized(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
containerName, _, _ := containerGetParentAndSnapshotName(info.Name)
- containerMntPoint := getContainerMountPoint("default", s.pool.Name, containerName)
+ containerMntPoint := getContainerMountPoint(info.Project, s.pool.Name, containerName)
err := createContainerMountpoint(containerMntPoint, containerPath(info.Name, false), info.Privileged)
if err != nil {
return err
From 13a310fda1d06b33fef6460a37d0cd173a9bfbf6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Wed, 10 Apr 2019 23:01:35 -0400
Subject: [PATCH 2/4] lxd/quota: Add new quota package
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This drives the projectquota feature available in recent kernels on top
of ext4 and xfs and will let us apply quotas on the directory backend.
Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
lxd/storage/quota/projectquota.go | 245 ++++++++++++++++++++++++++++++
1 file changed, 245 insertions(+)
create mode 100644 lxd/storage/quota/projectquota.go
diff --git a/lxd/storage/quota/projectquota.go b/lxd/storage/quota/projectquota.go
new file mode 100644
index 0000000000..5daeac1454
--- /dev/null
+++ b/lxd/storage/quota/projectquota.go
@@ -0,0 +1,245 @@
+package quota
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "github.com/lxc/lxd/shared"
+)
+
+/*
+#include <linux/fs.h>
+#include <linux/dqblk_xfs.h>
+#include <sys/ioctl.h>
+#include <sys/quota.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+int quota_supported(char *dev_path) {
+ struct if_dqinfo dqinfo;
+
+ return quotactl(QCMD(Q_GETINFO, PRJQUOTA), dev_path, 0, (caddr_t)&dqinfo);
+}
+
+int quota_get_usage(char *dev_path, uint32_t id) {
+ struct if_dqblk quota;
+
+ if (quotactl(QCMD(Q_GETQUOTA, PRJQUOTA), dev_path, id, (caddr_t)"a) < 0) {
+ return -1;
+ }
+
+ return quota.dqb_curspace;
+}
+
+
+int quota_set(char *dev_path, uint32_t id, int hard_bytes) {
+ struct if_dqblk quota;
+ fs_disk_quota_t xfsquota;
+
+ if (quotactl(QCMD(Q_GETQUOTA, PRJQUOTA), dev_path, id, (caddr_t)"a) < 0) {
+ return -1;
+ }
+
+ quota.dqb_bhardlimit = hard_bytes;
+ if (quotactl(QCMD(Q_SETQUOTA, PRJQUOTA), dev_path, id, (caddr_t)"a) < 0) {
+ xfsquota.d_version = FS_DQUOT_VERSION;
+ xfsquota.d_id = id;
+ xfsquota.d_flags = FS_PROJ_QUOTA;
+ xfsquota.d_fieldmask = FS_DQ_BHARD;
+ xfsquota.d_blk_hardlimit = hard_bytes * 1024 / 512;
+
+ if (quotactl(QCMD(Q_XSETQLIM, PRJQUOTA), dev_path, id, (caddr_t)&xfsquota) < 0) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int quota_set_path(char *path, uint32_t id) {
+ struct fsxattr attr;
+ int fd;
+ int ret;
+
+ fd = open(path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ return -1;
+
+ ret = ioctl(fd, FS_IOC_FSGETXATTR, &attr);
+ if (ret < 0) {
+ return -1;
+ }
+
+ attr.fsx_xflags |= FS_XFLAG_PROJINHERIT;
+ attr.fsx_projid = id;
+
+ ret = ioctl(fd, FS_IOC_FSSETXATTR, &attr);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t quota_get_path(char *path) {
+ struct fsxattr attr;
+ int fd;
+ int ret;
+
+ fd = open(path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ return -1;
+
+ ret = ioctl(fd, FS_IOC_FSGETXATTR, &attr);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return attr.fsx_projid;
+}
+
+*/
+import "C"
+
+var errNoDevice = fmt.Errorf("Couldn't find backing device for mountpoint")
+
+func devForPath(path string) (string, error) {
+ // Get major/minor
+ var stat syscall.Stat_t
+ err := syscall.Lstat(path, &stat)
+ if err != nil {
+ return "", err
+ }
+
+ devMajor := shared.Major(stat.Dev)
+ devMinor := shared.Minor(stat.Dev)
+
+ // Parse mountinfo for it
+ mountinfo, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", err
+ }
+ defer mountinfo.Close()
+
+ scanner := bufio.NewScanner(mountinfo)
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ tokens := strings.Fields(line)
+ if len(tokens) < 5 {
+ continue
+ }
+
+ if tokens[2] == fmt.Sprintf("%d:%d", devMajor, devMinor) {
+ if shared.PathExists(tokens[len(tokens)-2]) {
+ return tokens[len(tokens)-2], nil
+ }
+ }
+ }
+
+ return "", errNoDevice
+}
+
+// Supported check if the given path supports project quotas
+func Supported(path string) (bool, error) {
+ // Get the backing device
+ devPath, err := devForPath(path)
+ if err != nil {
+ return false, err
+ }
+
+ // Call quotactl through CGo
+ cDevPath := C.CString(devPath)
+ defer C.free(unsafe.Pointer(cDevPath))
+
+ return C.quota_supported(cDevPath) == 0, nil
+}
+
+// GetProject returns the project quota ID for the given path
+func GetProject(path string) (uint32, error) {
+ // Call ioctl through CGo
+ cPath := C.CString(path)
+ defer C.free(unsafe.Pointer(cPath))
+
+ id := C.quota_get_path(cPath)
+ if id < 0 {
+ return 0, fmt.Errorf("Failed to get project from '%s'", path)
+ }
+
+ return uint32(id), nil
+}
+
+// SetProject sets the project quota ID for the given path
+func SetProject(path string, id uint32) error {
+ // Call ioctl through CGo
+ cPath := C.CString(path)
+ defer C.free(unsafe.Pointer(cPath))
+
+ if C.quota_set_path(cPath, C.uint(id)) != 0 {
+ return fmt.Errorf("Failed to set project id '%d' on '%s'", id, path)
+ }
+
+ return nil
+}
+
+// DeleteProject unsets the project id from the path and clears the quota for the project id
+func DeleteProject(path string, id uint32) error {
+ // Unset the project from the path
+ err := SetProject(path, 0)
+ if err != nil {
+ return err
+ }
+
+ // Unset the quota on the project
+ err = SetProjectQuota(path, id, 0)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// GetProjectUsage returns the current consumption
+func GetProjectUsage(path string, id uint32) (int64, error) {
+ // Get the backing device
+ devPath, err := devForPath(path)
+ if err != nil {
+ return -1, err
+ }
+
+ // Call quotactl through CGo
+ cDevPath := C.CString(devPath)
+ defer C.free(unsafe.Pointer(cDevPath))
+
+ size := C.quota_get_usage(cDevPath, C.uint(id))
+ if size < 0 {
+ return -1, fmt.Errorf("Failed to get project consumption for id '%d' on '%s'", id, path)
+ }
+
+ return int64(size), nil
+}
+
+// SetProjectQuota sets the quota on the project id
+func SetProjectQuota(path string, id uint32, bytes int64) error {
+ // Get the backing device
+ devPath, err := devForPath(path)
+ if err != nil {
+ return err
+ }
+
+ // Call quotactl through CGo
+ cDevPath := C.CString(devPath)
+ defer C.free(unsafe.Pointer(cDevPath))
+
+ if C.quota_set(cDevPath, C.uint(id), C.int(bytes/1024)) != 0 {
+ return fmt.Errorf("Failed to set project quota for id '%d' on '%s'", id, path)
+ }
+
+ return nil
+}
From cbeefb9af98a9761231adf6c4efc8d34dbe97ca8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 11 Apr 2019 14:33:55 -0400
Subject: [PATCH 3/4] lxd/storage/dir: Add quota support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
lxd/storage.go | 6 ++-
lxd/storage_dir.go | 122 +++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 122 insertions(+), 6 deletions(-)
diff --git a/lxd/storage.go b/lxd/storage.go
index 41ea2e09e3..a9894198d1 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -309,8 +309,9 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
// Load the storage volume.
volume := &api.StorageVolume{}
- if volumeName != "" && volumeType >= 0 {
- _, volume, err = s.Cluster.StoragePoolNodeVolumeGetTypeByProject(project, volumeName, volumeType, poolID)
+ volumeID := int64(-1)
+ if volumeName != "" {
+ volumeID, volume, err = s.Cluster.StoragePoolNodeVolumeGetTypeByProject(project, volumeName, volumeType, poolID)
if err != nil {
return nil, err
}
@@ -338,6 +339,7 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
dir.poolID = poolID
dir.pool = pool
dir.volume = volume
+ dir.volumeID = volumeID
dir.s = s
err = dir.StoragePoolInit()
if err != nil {
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index b72a993101..b7635fd007 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -14,6 +14,7 @@ import (
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/state"
+ "github.com/lxc/lxd/lxd/storage/quota"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/ioprogress"
@@ -22,6 +23,8 @@ import (
type storageDir struct {
storageShared
+
+ volumeID int64
}
// Only initialize the minimal information we need about a given storage type.
@@ -349,6 +352,11 @@ func (s *storageDir) StoragePoolVolumeCreate() error {
return err
}
+ err = s.initQuota(storageVolumePath, s.volumeID)
+ if err != nil {
+ return err
+ }
+
logger.Infof("Created DIR storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
return nil
}
@@ -366,7 +374,12 @@ func (s *storageDir) StoragePoolVolumeDelete() error {
return nil
}
- err := os.RemoveAll(storageVolumePath)
+ err := s.deleteQuota(storageVolumePath, s.volumeID)
+ if err != nil {
+ return err
+ }
+
+ err = os.RemoveAll(storageVolumePath)
if err != nil {
return err
}
@@ -503,6 +516,11 @@ func (s *storageDir) ContainerCreate(container container) error {
deleteContainerMountpoint(containerMntPoint, container.Path(), s.GetStorageTypeName())
}()
+ err = s.initQuota(containerMntPoint, s.volumeID)
+ if err != nil {
+ return err
+ }
+
err = container.TemplateApply("create")
if err != nil {
return errors.Wrap(err, "Apply template")
@@ -542,6 +560,11 @@ func (s *storageDir) ContainerCreateFromImage(container container, imageFingerpr
s.ContainerDelete(container)
}()
+ err = s.initQuota(containerMntPoint, s.volumeID)
+ if err != nil {
+ return err
+ }
+
imagePath := shared.VarPath("images", imageFingerprint)
err = unpackImage(imagePath, containerMntPoint, storageTypeDir, s.s.OS.RunningInUserNS, nil)
if err != nil {
@@ -580,6 +603,12 @@ func (s *storageDir) ContainerDelete(container container) error {
// ${POOL}/containers/<container_name>
containerName := container.Name()
containerMntPoint := getContainerMountPoint(container.Project(), s.pool.Name, containerName)
+
+ err = s.deleteQuota(containerMntPoint, s.volumeID)
+ if err != nil {
+ return err
+ }
+
if shared.PathExists(containerMntPoint) {
err := os.RemoveAll(containerMntPoint)
if err != nil {
@@ -633,6 +662,11 @@ func (s *storageDir) copyContainer(target container, source container) error {
return err
}
+ err = s.initQuota(targetContainerMntPoint, s.volumeID)
+ if err != nil {
+ return err
+ }
+
bwlimit := s.pool.Config["rsync.bwlimit"]
output, err := rsyncLocalCopy(sourceContainerMntPoint, targetContainerMntPoint, bwlimit)
if err != nil {
@@ -867,8 +901,21 @@ func (s *storageDir) ContainerRestore(container container, sourceContainer conta
return nil
}
-func (s *storageDir) ContainerGetUsage(container container) (int64, error) {
- return -1, fmt.Errorf("The directory container backend doesn't support quotas")
+func (s *storageDir) ContainerGetUsage(c container) (int64, error) {
+ path := getContainerMountPoint(c.Project(), s.pool.Name, c.Name())
+
+ ok, err := quota.Supported(path)
+ if err != nil || !ok {
+ return -1, fmt.Errorf("The backing filesystem doesn't support quotas")
+ }
+
+ projectID := uint32(s.volumeID + 10000)
+ size, err := quota.GetProjectUsage(path, projectID)
+ if err != nil {
+ return -1, err
+ }
+
+ return size, nil
}
func (s *storageDir) ContainerSnapshotCreate(snapshotContainer container, sourceContainer container) error {
@@ -1264,7 +1311,69 @@ func (s *storageDir) MigrationSink(conn *websocket.Conn, op *operation, args Mig
}
func (s *storageDir) StorageEntitySetQuota(volumeType int, size int64, data interface{}) error {
- logger.Warnf("Skipping setting disk quota for '%s' as DIR backend doesn't support them", s.volume.Name)
+ var path string
+ switch volumeType {
+ case storagePoolVolumeTypeContainer:
+ c := data.(container)
+ path = getContainerMountPoint(c.Project(), s.pool.Name, c.Name())
+ case storagePoolVolumeTypeCustom:
+ path = getStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
+ }
+
+ ok, err := quota.Supported(path)
+ if err != nil || !ok {
+ logger.Warnf("Skipping setting disk quota for '%s' as the underlying filesystem doesn't support them", s.volume.Name)
+ return nil
+ }
+
+ projectID := uint32(s.volumeID + 10000)
+ err = quota.SetProjectQuota(path, projectID, size)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *storageDir) initQuota(path string, id int64) error {
+ if s.volumeID == 0 {
+ return fmt.Errorf("Missing volume ID")
+ }
+
+ ok, err := quota.Supported(path)
+ if err != nil || !ok {
+ return nil
+ }
+
+ projectID := uint32(s.volumeID + 10000)
+ err = quota.SetProject(path, projectID)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *storageDir) deleteQuota(path string, id int64) error {
+ if s.volumeID == 0 {
+ return fmt.Errorf("Missing volume ID")
+ }
+
+ ok, err := quota.Supported(path)
+ if err != nil || !ok {
+ return nil
+ }
+
+ err = quota.SetProject(path, 0)
+ if err != nil {
+ return err
+ }
+
+ projectID := uint32(s.volumeID + 10000)
+ err = quota.SetProjectQuota(path, projectID, 0)
+ if err != nil {
+ return err
+ }
return nil
}
@@ -1471,6 +1580,11 @@ func (s *storageDir) copyVolume(sourcePool string, source string, target string)
return err
}
+ err = s.initQuota(dstMountPoint, s.volumeID)
+ if err != nil {
+ return err
+ }
+
bwlimit := s.pool.Config["rsync.bwlimit"]
_, err = rsyncLocalCopy(srcMountPoint, dstMountPoint, bwlimit)
From 616ee4ed6b771b978898d99fde65bc700169618f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 11 Apr 2019 14:36:55 -0400
Subject: [PATCH 4/4] doc: Update storage documentation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
doc/storage.md | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/doc/storage.md b/doc/storage.md
index ab2a87c448..2b4f287133 100644
--- a/doc/storage.md
+++ b/doc/storage.md
@@ -69,7 +69,7 @@ Block based | no | no | yes | no |
Instant cloning | no | yes | yes | yes | yes
Storage driver usable inside a container | yes | yes | no | no | no
Restore from older snapshots (not latest) | yes | yes | yes | no | yes
-Storage quotas | no | yes | yes | yes | no
+Storage quotas | yes(\*) | yes | yes | yes | no
## Recommended setup
The two best options for use with LXD are ZFS and btrfs.
@@ -162,6 +162,8 @@ This also means that access to cached data will not be affected by the limit.
- While this backend is fully functional, it's also much slower than
all the others due to it having to unpack images or do instant copies of
containers, snapshots and images.
+ - Quotas are supported with the directory backend when running on
+ either ext4 or XFS with project quotas enabled at the filesystem level.
#### The following commands can be used to create directory storage pools
More information about the lxc-devel
mailing list