[lxc-devel] [lxd/master] tests: Reduce ceph pg_num down to 1

stgraber on Github lxc-bot at linuxcontainers.org
Wed May 30 01:42:56 UTC 2018


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 354 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20180530/f1e013ad/attachment.bin>
-------------- next part --------------
From 9aa110d64ad7862609996037c3ead4b717857b6e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Tue, 29 May 2018 21:41:28 -0400
Subject: [PATCH] tests: Reduce ceph pg_num down to 1
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 test/backends/ceph.sh                        | 2 +-
 test/includes/clustering.sh                  | 2 +-
 test/suites/clustering.sh                    | 2 +-
 test/suites/storage_driver_ceph.sh           | 6 +++---
 test/suites/storage_local_volume_handling.sh | 4 ++--
 5 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/test/backends/ceph.sh b/test/backends/ceph.sh
index 7cdaa2134..77b4ee363 100644
--- a/test/backends/ceph.sh
+++ b/test/backends/ceph.sh
@@ -15,7 +15,7 @@ ceph_configure() {
 
   echo "==> Configuring CEPH backend in ${LXD_DIR}"
 
-  lxc storage create "lxdtest-$(basename "${LXD_DIR}")" ceph volume.size=25MB ceph.osd.pg_num=8
+  lxc storage create "lxdtest-$(basename "${LXD_DIR}")" ceph volume.size=25MB ceph.osd.pg_num=1
   lxc profile device add default root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")"
 }
 
diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index dc800b6e1..47a9d26e7 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -182,7 +182,7 @@ EOF
   config:
     source: lxdtest-$(basename "${TEST_DIR}")
     volume.size: 25GB
-    ceph.osd.pg_num: 8
+    ceph.osd.pg_num: 1
 EOF
     fi
     cat >> "${LXD_DIR}/preseed.yaml" <<EOF
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 22fcfc0c6..3a4f52a38 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -404,7 +404,7 @@ test_clustering_storage() {
   if [ "${driver}" = "lvm" ]; then
       LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 "${driver}" volume.size=25MB
   elif [ "${driver}" = "ceph" ]; then
-      LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 "${driver}" volume.size=25MB ceph.osd.pg_num=8
+      LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 "${driver}" volume.size=25MB ceph.osd.pg_num=1
   else
       LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 "${driver}"
   fi
diff --git a/test/suites/storage_driver_ceph.sh b/test/suites/storage_driver_ceph.sh
index b4211e693..174269642 100644
--- a/test/suites/storage_driver_ceph.sh
+++ b/test/suites/storage_driver_ceph.sh
@@ -19,7 +19,7 @@ test_storage_driver_ceph() {
     fi
 
     # shellcheck disable=SC1009
-    lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" ceph volume.size=25MB ceph.osd.pg_num=8
+    lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" ceph volume.size=25MB ceph.osd.pg_num=1
 
     # Set default storage pool for image import.
     lxc profile device add default root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
@@ -31,7 +31,7 @@ test_storage_driver_ceph() {
     ceph --cluster "${LXD_CEPH_CLUSTER}" osd pool create "lxdtest-$(basename "${LXD_DIR}")-existing-osd-pool" 32
 
     # Let LXD use an already existing osd pool.
-    lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool2" ceph source="lxdtest-$(basename "${LXD_DIR}")-existing-osd-pool" volume.size=25MB ceph.osd.pg_num=8
+    lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool2" ceph source="lxdtest-$(basename "${LXD_DIR}")-existing-osd-pool" volume.size=25MB ceph.osd.pg_num=1
 
     # Test that no invalid ceph storage pool configuration keys can be set.
     ! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-ceph-pool-config" ceph lvm.thinpool_name=bla
@@ -39,7 +39,7 @@ test_storage_driver_ceph() {
     ! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-ceph-pool-config" ceph lvm.vg_name=bla
 
     # Test that all valid ceph storage pool configuration keys can be set.
-    lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-ceph-pool-config" ceph volume.block.filesystem=ext4 volume.block.mount_options=discard volume.size=2GB ceph.rbd.clone_copy=true ceph.osd.pg_num=8
+    lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-ceph-pool-config" ceph volume.block.filesystem=ext4 volume.block.mount_options=discard volume.size=2GB ceph.rbd.clone_copy=true ceph.osd.pg_num=1
     lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-ceph-pool-config"
 
     # Muck around with some containers on various pools.
diff --git a/test/suites/storage_local_volume_handling.sh b/test/suites/storage_local_volume_handling.sh
index 0c7dfdcc8..2a395d5bf 100644
--- a/test/suites/storage_local_volume_handling.sh
+++ b/test/suites/storage_local_volume_handling.sh
@@ -21,7 +21,7 @@ test_storage_local_volume_handling() {
     fi
 
     if storage_backend_available "ceph"; then
-      lxc storage create "lxdtest-$(basename "${LXD_DIR}")-ceph" ceph volume.size=25MB ceph.osd.pg_num=8
+      lxc storage create "lxdtest-$(basename "${LXD_DIR}")-ceph" ceph volume.size=25MB ceph.osd.pg_num=1
     fi
 
     lxc storage create "lxdtest-$(basename "${LXD_DIR}")-dir" dir
@@ -58,7 +58,7 @@ test_storage_local_volume_handling() {
         fi
 
         if [ "$driver" = "ceph" ]; then
-          pool_opts="volume.size=25MB ceph.osd.pg_num=8"
+          pool_opts="volume.size=25MB ceph.osd.pg_num=1"
         fi
 
         if [ "$driver" = "lvm" ]; then


More information about the lxc-devel mailing list