[lxc-devel] [lxd/master] Make use of the new cluster join API in lxd init
freeekanayaka on Github
lxc-bot at linuxcontainers.org
Fri Aug 3 16:45:29 UTC 2018
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 600 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20180803/e35e08f0/attachment.bin>
-------------- next part --------------
From f0544fa39d392fce7ba8972ffeeefcb75f53f701 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 3 Aug 2018 16:24:19 +0000
Subject: [PATCH] Make use of the new cluster join API in lxd init
This changes the format of the YAML, but the code handling the old format is
still there so it will continue to work (although we now print the new format
when you dump the YAML config at the end of interactive mode).
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
client/lxd_cluster.go | 2 +-
doc/clustering.md | 45 ++++++------------
lxd/main_init.go | 14 ++++++
lxd/main_init_interactive.go | 89 +++++++++---------------------------
test/includes/clustering.sh | 53 +++++++--------------
5 files changed, 69 insertions(+), 134 deletions(-)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 580d76c65e..824d94a7f0 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -23,7 +23,7 @@ func (r *ProtocolLXD) GetCluster() (*api.Cluster, string, error) {
return cluster, etag, nil
}
-// UpdateCluster requests to bootstrap a new cluster
+// UpdateCluster requests to bootstrap a new cluster or join an existing one.
func (r *ProtocolLXD) UpdateCluster(cluster api.ClusterPut, ETag string) (Operation, error) {
if !r.HasExtension("clustering") {
return nil, fmt.Errorf("The server is missing the required \"clustering\" API extension")
diff --git a/doc/clustering.md b/doc/clustering.md
index bc353d1338..436b141a59 100644
--- a/doc/clustering.md
+++ b/doc/clustering.md
@@ -87,42 +87,22 @@ cluster:
Then run `cat <preseed-file> | lxd init --preseed` and your first node
should be bootstrapped.
-Now create a bootstrap file for another node. Be sure to specify the
-address and certificate of the target bootstrap node. To create a
-YAML-compatible entry for the `<cert>` key you can use a command like
-`sed ':a;N;$!ba;s/\n/\n\n/g' /var/lib/lxd/server.crt`, which you have to
-run on the bootstrap node.
+Now create a bootstrap file for another node. You only need to fill in the
+``cluster`` section with data and config values that are specific to the joining
+node.
+
+Be sure to include the address and certificate of the target bootstrap node. To
+create a YAML-compatible entry for the ``cluster_certificate`` key you can use a
+command like `sed ':a;N;$!ba;s/\n/\n\n/g' /var/lib/lxd/server.crt`, which you
+have to run on the bootstrap node.
For example:
```yaml
-config:
- core.https_address: 10.55.60.155:8443
- images.auto_update_interval: 15
-storage_pools:
-- name: default
- driver: dir
-networks:
-- name: lxdbr0
- type: bridge
- config:
- ipv4.address: 192.168.100.14/24
- ipv6.address: none
-profiles:
-- name: default
- devices:
- root:
- path: /
- pool: default
- type: disk
- eth0:
- name: eth0
- nictype: bridged
- parent: lxdbr0
- type: nic
cluster:
- server_name: node2
enabled: true
+ server_name: node2
+ server_address: 10.55.60.155:8443
cluster_address: 10.55.60.171:8443
cluster_certificate: "-----BEGIN CERTIFICATE-----
@@ -135,6 +115,11 @@ opyQ1VRpAg2sV2C4W8irbNqeUsTeZZxhLqp4vNOXXBBrSqUCdPu1JXADV0kavg1l
-----END CERTIFICATE-----
"
cluster_password: sekret
+ member_config:
+ - entity: storage-pool
+ name: default
+ key: source
+ value: ""
```
## Managing a cluster
diff --git a/lxd/main_init.go b/lxd/main_init.go
index 72c826154c..302657da10 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -124,6 +124,20 @@ func (c *cmdInit) Run(cmd *cobra.Command, args []string) error {
}
}
+ // Detect if the user has choosen to join a cluster using the new
+ // cluster join API format, and use the dedicated API if so.
+ if config.Cluster != nil && config.Cluster.ClusterAddress != "" && config.Cluster.ServerAddress != "" {
+ op, err := d.UpdateCluster(config.Cluster.ClusterPut, "")
+ if err != nil {
+ return errors.Wrap(err, "Failed to join cluster")
+ }
+ err = op.Wait()
+ if err != nil {
+ return errors.Wrap(err, "Failed to join cluster")
+ }
+ return nil
+ }
+
revert, err := initDataNodeApply(d, config.Node)
if err != nil {
revert()
diff --git a/lxd/main_init_interactive.go b/lxd/main_init_interactive.go
index 5bdcc5b95f..de51bbd69d 100644
--- a/lxd/main_init_interactive.go
+++ b/lxd/main_init_interactive.go
@@ -74,7 +74,19 @@ func (c *cmdInit) RunInteractive(cmd *cobra.Command, args []string, d lxd.Contai
// Print the YAML
if cli.AskBool("Would you like a YAML \"lxd init\" preseed to be printed? (yes/no) [default=no]: ", "no") {
- out, err := yaml.Marshal(config)
+ var object cmdInitData
+
+ // If the user has chosen to join an existing cluster, print
+ // only YAML for the cluster section, which is the only
+ // relevant one. Otherwise print the regular config.
+ if config.Cluster != nil && config.Cluster.ClusterAddress != "" {
+ object = cmdInitData{}
+ object.Cluster = config.Cluster
+ } else {
+ object = config
+ }
+
+ out, err := yaml.Marshal(object)
if err != nil {
return nil, errors.Wrap(err, "Failed to render the config")
}
@@ -107,6 +119,7 @@ func (c *cmdInit) askClustering(config *cmdInitData, d lxd.ContainerServer) erro
if cli.AskBool("Are you joining an existing cluster? (yes/no) [default=no]: ", "no") {
// Existing cluster
+ config.Cluster.ServerAddress = serverAddress
for {
// Cluster URL
clusterAddress := cli.AskString("IP address or FQDN of an existing cluster node: ", "", nil)
@@ -171,77 +184,19 @@ func (c *cmdInit) askClustering(config *cmdInitData, d lxd.ContainerServer) erro
return err
}
- // Prompt for storage config
- targetPools, err := client.GetStoragePools()
+ // Get the list of required member config keys.
+ cluster, _, err := client.GetCluster()
if err != nil {
- return errors.Wrap(err, "Failed to retrieve storage pools from the cluster")
+ return errors.Wrap(err, "Failed to retrieve cluster information")
}
- config.Node.StoragePools = []api.StoragePoolsPost{}
- for _, pool := range targetPools {
- // Skip pending pools
- if pool.Status == "PENDING" {
- continue
- }
-
- // Skip ceph pools since they have no node-specific key
- if pool.Driver == "ceph" {
- continue
- }
-
- // Setup the new local pool
- newPool := api.StoragePoolsPost{
- StoragePoolPut: pool.StoragePoolPut,
- Driver: pool.Driver,
- Name: pool.Name,
- }
-
- // Delete config keys that are automatically populated by LXD
- delete(newPool.Config, "volatile.initial_source")
- delete(newPool.Config, "zfs.pool_name")
-
- // Only ask for the node-specific "source" key if it's defined in the target node
- if pool.Config["source"] != "" {
- // Dummy validator for allowing empty strings
- validator := func(string) error { return nil }
- newPool.Config["source"] = cli.AskString(
- fmt.Sprintf(`Choose the local disk or dataset for storage pool "%s" (empty for loop disk): `, pool.Name), "", validator)
- }
-
- config.Node.StoragePools = append(config.Node.StoragePools, newPool)
- }
-
- // Prompt for network config
- targetNetworks, err := client.GetNetworks()
- if err != nil {
- return errors.Wrap(err, "Failed to retrieve networks from the cluster")
+ validator := func(string) error { return nil }
+ for i, config := range cluster.MemberConfig {
+ question := fmt.Sprintf("Choose %s: ", config.Description)
+ cluster.MemberConfig[i].Value = cli.AskString(question, "", validator)
}
- config.Node.Networks = []api.NetworksPost{}
- for _, network := range targetNetworks {
- // Skip not-managed or pending networks
- if !network.Managed || network.Status == "PENDING" {
- continue
- }
-
- // Setup the new local network
- newNetwork := api.NetworksPost{
- NetworkPut: network.NetworkPut,
- Managed: true,
- Name: network.Name,
- Type: network.Type,
- }
-
- // Only ask for the node-specific "bridge.external_interfaces" key if it's defined in the target node
- if network.Config["bridge.external_interfaces"] != "" {
- // Dummy validator for allowing empty strings
- validator := func(string) error { return nil }
- newNetwork.Config["bridge.external_interfaces"] = cli.AskString(
- fmt.Sprintf(`Choose the local network interface to connect to network "%s" (empty for none): `, network.Name), "", validator)
- }
-
- config.Node.Networks = append(config.Node.Networks, newNetwork)
- }
+ config.Cluster.MemberConfig = cluster.MemberConfig
} else {
// Password authentication
if cli.AskBool("Setup password authentication on the cluster? (yes/no) [default=yes]: ", "yes") {
diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index 47a9d26e74..8da0034503 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -228,53 +228,34 @@ spawn_lxd_and_join_cluster() {
set -e
cat > "${LXD_DIR}/preseed.yaml" <<EOF
-config:
- core.https_address: 10.1.1.10${index}:8443
- images.auto_update_interval: 0
+cluster:
+ enabled: true
+ server_name: node${index}
+ server_address: 10.1.1.10${index}:8443
+ cluster_address: 10.1.1.10${target}:8443
+ cluster_certificate: "$cert"
+ cluster_password: sekret
+ member_config:
EOF
# Declare the pool only if the driver is not ceph, because
# the ceph pool doesn't need to be created on the joining
# node (it's shared with the bootstrap one).
if [ "${driver}" != "ceph" ]; then
cat >> "${LXD_DIR}/preseed.yaml" <<EOF
-storage_pools:
-- name: data
- driver: $driver
-EOF
- if [ "${driver}" = "btrfs" ]; then
- cat >> "${LXD_DIR}/preseed.yaml" <<EOF
- config:
- size: 100GB
+ - entity: storage-pool
+ name: data
+ key: source
+ value: ""
EOF
- fi
if [ "${driver}" = "zfs" ]; then
cat >> "${LXD_DIR}/preseed.yaml" <<EOF
- config:
- size: 100GB
- zfs.pool_name: lxdtest-$(basename "${TEST_DIR}")-${ns}
-EOF
- fi
- if [ "${driver}" = "lvm" ]; then
- cat >> "${LXD_DIR}/preseed.yaml" <<EOF
- config:
- volume.size: 25MB
+ - entity: storage-pool
+ name: data
+ key: zfs.pool_name
+ value: lxdtest-$(basename "${TEST_DIR}")-${ns}
EOF
fi
fi
- cat >> "${LXD_DIR}/preseed.yaml" <<EOF
-networks:
-- name: $bridge
- type: bridge
- config:
- ipv4.address: none
- ipv6.address: none
-cluster:
- server_name: node${index}
- enabled: true
- cluster_address: 10.1.1.10${target}:8443
- cluster_certificate: "$cert"
- cluster_password: sekret
-EOF
- lxd init --preseed < "${LXD_DIR}/preseed.yaml"
+ lxd init --preseed < "${LXD_DIR}/preseed.yaml"
)
}
More information about the lxc-devel
mailing list