[lxc-devel] [lxd/master] Properly validate the server configuration keys
stgraber on Github
lxc-bot at linuxcontainers.org
Tue Apr 26 21:24:41 UTC 2016
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 370 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20160426/8ad62787/attachment.bin>
-------------- next part --------------
From 0aec2538c40b8e5373524b79b82de75a07a5fa6c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Tue, 26 Apr 2016 14:43:52 -0400
Subject: [PATCH] Properly validate the server configuration keys
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Closes #1939
Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
lxd/api_1.0.go | 113 ++++--------------
lxd/certificates.go | 2 +-
lxd/containers_post.go | 5 +-
lxd/daemon.go | 247 +++++++-------------------------------
lxd/daemon_config.go | 319 +++++++++++++++++++++++++++++++++++++++++++++++++
lxd/db_images.go | 2 +-
lxd/db_update.go | 5 +-
lxd/images.go | 32 +----
lxd/main.go | 6 +-
lxd/storage_lvm.go | 143 ++++++----------------
lxd/storage_zfs.go | 33 +----
11 files changed, 443 insertions(+), 464 deletions(-)
create mode 100644 lxd/daemon_config.go
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index db3a8fb..6b20078 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -5,6 +5,7 @@ import (
"fmt"
"net/http"
"os"
+ "reflect"
"syscall"
"gopkg.in/lxc/go-lxc.v2"
@@ -125,23 +126,7 @@ func api10Get(d *Daemon, r *http.Request) Response {
body["environment"] = env
body["public"] = false
-
- serverConfig, err := d.ConfigValuesGet()
- if err != nil {
- return InternalError(err)
- }
-
- config := shared.Jmap{}
-
- for key, value := range serverConfig {
- if key == "core.trust_password" {
- config[key] = true
- } else {
- config[key] = value
- }
- }
-
- body["config"] = config
+ body["config"] = daemonConfigRender()
} else {
body["auth"] = "untrusted"
body["public"] = false
@@ -166,6 +151,14 @@ func api10Put(d *Daemon, r *http.Request) Response {
return BadRequest(err)
}
+ // Deal with special keys
+ for k, v := range req.Config {
+ config := daemonConfig[k]
+ if config.hiddenValue && v == true {
+ req.Config[k] = oldConfig[k]
+ }
+ }
+
// Diff the configs
changedConfig := map[string]interface{}{}
for key, value := range oldConfig {
@@ -180,84 +173,26 @@ func api10Put(d *Daemon, r *http.Request) Response {
}
}
- for key, value := range changedConfig {
- if value == nil {
- value = ""
+ for key, valueRaw := range changedConfig {
+ if valueRaw == nil {
+ valueRaw = ""
}
- if !d.ConfigKeyIsValid(key) {
- return BadRequest(fmt.Errorf("Bad server config key: '%s'", key))
+ s := reflect.ValueOf(valueRaw)
+ if !s.IsValid() || s.Kind() != reflect.String {
+ return BadRequest(fmt.Errorf("Invalid value type for '%s'", key))
}
- if key == "core.trust_password" {
- if value == true {
- continue
- }
-
- err := d.PasswordSet(value.(string))
- if err != nil {
- return InternalError(err)
- }
- } else if key == "storage.lvm_vg_name" {
- err := storageLVMSetVolumeGroupNameConfig(d, value.(string))
- if err != nil {
- return InternalError(err)
- }
- if err = d.SetupStorageDriver(); err != nil {
- return InternalError(err)
- }
- } else if key == "storage.lvm_thinpool_name" {
- err := storageLVMSetThinPoolNameConfig(d, value.(string))
- if err != nil {
- return InternalError(err)
- }
- } else if key == "storage.zfs_pool_name" {
- err := storageZFSSetPoolNameConfig(d, value.(string))
- if err != nil {
- return InternalError(err)
- }
- if err = d.SetupStorageDriver(); err != nil {
- return InternalError(err)
- }
- } else if key == "core.https_address" {
- old_address, err := d.ConfigValueGet("core.https_address")
- if err != nil {
- return InternalError(err)
- }
-
- err = d.UpdateHTTPsPort(old_address, value.(string))
- if err != nil {
- return InternalError(err)
- }
+ value := valueRaw.(string)
- err = d.ConfigValueSet(key, value.(string))
- if err != nil {
- return InternalError(err)
- }
- } else if key == "core.proxy_https" || key == "core.proxy_http" || key == "core.proxy_ignore_hosts" {
- err = d.ConfigValueSet(key, value.(string))
- if err != nil {
- return InternalError(err)
- }
-
- // Update the cached proxy function
- d.updateProxy()
-
- // Clear the simplestreams cache as it's tied to the old proxy config
- imageStreamCacheLock.Lock()
- for k, _ := range imageStreamCache {
- delete(imageStreamCache, k)
- }
- imageStreamCacheLock.Unlock()
+ confKey, ok := daemonConfig[key]
+ if !ok {
+ return BadRequest(fmt.Errorf("Bad server config key: '%s'", key))
+ }
- } else {
- err := d.ConfigValueSet(key, value.(string))
- if err != nil {
- return InternalError(err)
- }
- if key == "images.remote_cache_expiry" {
- d.pruneChan <- true
- }
+ err := confKey.Set(d, value)
+ if err != nil {
+ return BadRequest(err)
}
}
diff --git a/lxd/certificates.go b/lxd/certificates.go
index e432f4a..df3b70b 100644
--- a/lxd/certificates.go
+++ b/lxd/certificates.go
@@ -140,7 +140,7 @@ func certificatesPost(d *Daemon, r *http.Request) Response {
}
}
- if !d.isTrustedClient(r) && !d.PasswordCheck(req.Password) {
+ if !d.isTrustedClient(r) && d.PasswordCheck(req.Password) != nil {
return Forbidden
}
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 8668dd1..25e37ca 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -122,8 +122,9 @@ func createFromImage(d *Daemon, req *containerPostReq) Response {
run := func(op *operation) error {
if req.Source.Server != "" {
- updateCached, _ := d.ConfigValueGet("images.auto_update_cached")
- hash, err = d.ImageDownload(op, req.Source.Server, req.Source.Protocol, req.Source.Certificate, req.Source.Secret, hash, true, updateCached != "false")
+ hash, err = d.ImageDownload(
+ op, req.Source.Server, req.Source.Protocol, req.Source.Certificate, req.Source.Secret,
+ hash, true, daemonConfig["images.auto_update_cached"].GetBool())
if err != nil {
return err
}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 5a1883e..4e3f635 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -2,7 +2,6 @@ package main
import (
"bytes"
- "crypto/rand"
"crypto/tls"
"crypto/x509"
"database/sql"
@@ -55,11 +54,6 @@ var cgSwapAccounting = false
// UserNS
var runningInUserns = false
-const (
- pwSaltBytes = 32
- pwHashBytes = 64
-)
-
type Socket struct {
Socket net.Listener
CloseOnExit bool
@@ -89,8 +83,6 @@ type Daemon struct {
devlxd *net.UnixListener
- configValues map[string]string
-
MockMode bool
SetupMode bool
@@ -113,14 +105,6 @@ type Command struct {
delete func(d *Daemon, r *http.Request) Response
}
-func (d *Daemon) updateProxy() {
- d.proxy = shared.ProxyFromConfig(
- d.configValues["core.proxy_https"],
- d.configValues["core.proxy_http"],
- d.configValues["core.proxy_ignore_hosts"],
- )
-}
-
func (d *Daemon) httpGetSync(url string, certificate string) (*lxd.Response, error) {
var err error
@@ -360,32 +344,27 @@ func (d *Daemon) createCmd(version string, c Command) {
})
}
-func (d *Daemon) SetupStorageDriver() error {
- lvmVgName, err := d.ConfigValueGet("storage.lvm_vg_name")
- if err != nil {
- return fmt.Errorf("Couldn't read config: %s", err)
- }
+func (d *Daemon) SetupStorageDriver(driver string) error {
+ var err error
- zfsPoolName, err := d.ConfigValueGet("storage.zfs_pool_name")
- if err != nil {
- return fmt.Errorf("Couldn't read config: %s", err)
- }
+ lvmVgName := daemonConfig["storage.lvm_vg_name"].Get()
+ zfsPoolName := daemonConfig["storage.zfs_pool_name"].Get()
- if lvmVgName != "" {
+ if driver == "lvm" || lvmVgName != "" {
d.Storage, err = newStorage(d, storageTypeLvm)
if err != nil {
shared.Logf("Could not initialize storage type LVM: %s - falling back to dir", err)
} else {
return nil
}
- } else if zfsPoolName != "" {
+ } else if driver == "zfs" || zfsPoolName != "" {
d.Storage, err = newStorage(d, storageTypeZfs)
if err != nil {
shared.Logf("Could not initialize storage type ZFS: %s - falling back to dir", err)
} else {
return nil
}
- } else if d.BackingFs == "btrfs" {
+ } else if driver == "btrfs" || d.BackingFs == "btrfs" {
d.Storage, err = newStorage(d, storageTypeBtrfs)
if err != nil {
shared.Logf("Could not initialize storage type btrfs: %s - falling back to dir", err)
@@ -445,9 +424,9 @@ func setupSharedMounts() error {
func (d *Daemon) ListenAddresses() ([]string, error) {
addresses := make([]string, 0)
- value, err := d.ConfigValueGet("core.https_address")
- if err != nil || value == "" {
- return addresses, err
+ value := daemonConfig["core.https_address"].Get()
+ if value == "" {
+ return addresses, nil
}
localHost, localPort, err := net.SplitHostPort(value)
@@ -502,7 +481,9 @@ func (d *Daemon) ListenAddresses() ([]string, error) {
return addresses, nil
}
-func (d *Daemon) UpdateHTTPsPort(oldAddress string, newAddress string) error {
+func (d *Daemon) UpdateHTTPsPort(newAddress string) error {
+ oldAddress := daemonConfig["core.https_address"].Get()
+
if oldAddress == newAddress {
return nil
}
@@ -754,22 +735,26 @@ func (d *Daemon) Init() error {
return err
}
+ /* Load all config values from the database */
+ err = daemonConfigInit(d.db)
+ if err != nil {
+ return err
+ }
+
/* Setup the storage driver */
if !d.MockMode {
- err = d.SetupStorageDriver()
+ err = d.SetupStorageDriver("")
if err != nil {
return fmt.Errorf("Failed to setup storage: %s", err)
}
}
- /* Load all config values from the database */
- _, err = d.ConfigValuesGet()
- if err != nil {
- return err
- }
-
/* set the initial proxy function based on config values in the DB */
- d.updateProxy()
+ d.proxy = shared.ProxyFromConfig(
+ daemonConfig["core.proxy_https"].Get(),
+ daemonConfig["core.proxy_http"].Get(),
+ daemonConfig["core.proxy_ignore_hosts"].Get(),
+ )
/* Setup /dev/lxd */
d.devlxd, err = createAndBindDevLxd()
@@ -902,11 +887,7 @@ func (d *Daemon) Init() error {
d.UnixSocket = &Socket{Socket: unixl, CloseOnExit: true}
}
- listenAddr, err := d.ConfigValueGet("core.https_address")
- if err != nil {
- return err
- }
-
+ listenAddr := daemonConfig["core.https_address"].Get()
if listenAddr != "" {
_, _, err := net.SplitHostPort(listenAddr)
if err != nil {
@@ -980,18 +961,9 @@ func (d *Daemon) Ready() error {
autoUpdateImages(d)
for {
- interval, _ := d.ConfigValueGet("images.auto_update_interval")
- if interval == "" {
- interval = "6"
- }
-
- intervalInt, err := strconv.Atoi(interval)
- if err != nil {
- intervalInt = 0
- }
-
- if intervalInt > 0 {
- timer := time.NewTimer(time.Duration(intervalInt) * time.Hour)
+ interval := daemonConfig["images.auto_update_interval"].GetInt64()
+ if interval > 0 {
+ timer := time.NewTimer(time.Duration(interval) * time.Hour)
timeChan := timer.C
select {
@@ -1101,162 +1073,31 @@ func (d *Daemon) Stop() error {
return err
}
-// ConfigKeyIsValid returns if the given key is a known config value.
-func (d *Daemon) ConfigKeyIsValid(key string) bool {
- switch key {
- case "core.https_address":
- return true
- case "core.https_allowed_origin":
- return true
- case "core.https_allowed_methods":
- return true
- case "core.https_allowed_headers":
- return true
- case "core.proxy_https":
- return true
- case "core.proxy_http":
- return true
- case "core.proxy_ignore_hosts":
- return true
- case "core.trust_password":
- return true
- case "storage.lvm_vg_name":
- return true
- case "storage.lvm_thinpool_name":
- return true
- case "storage.lvm_fstype":
- return true
- case "storage.lvm_volume_size":
- return true
- case "storage.zfs_remove_snapshots":
- return true
- case "storage.zfs_pool_name":
- return true
- case "images.remote_cache_expiry":
- return true
- case "images.compression_algorithm":
- return true
- case "images.auto_update_interval":
- return true
- case "images.auto_update_cached":
- return true
- }
-
- return false
-}
-
-// ConfigValueGet returns a config value from the memory,
-// calls ConfigValuesGet if required.
-// It returns a empty result if the config key isn't given.
-func (d *Daemon) ConfigValueGet(key string) (string, error) {
- if d.configValues == nil {
- if _, err := d.ConfigValuesGet(); err != nil {
- return "", err
- }
- }
-
- if val, ok := d.configValues[key]; ok {
- return val, nil
- }
-
- return "", nil
-}
-
-// ConfigValuesGet fetches all config values and stores them in memory.
-func (d *Daemon) ConfigValuesGet() (map[string]string, error) {
- if d.configValues == nil {
- var err error
- d.configValues, err = dbConfigValuesGet(d.db)
- if err != nil {
- return d.configValues, err
- }
- }
-
- return d.configValues, nil
-}
-
-// ConfigValueSet sets a new or updates a config value,
-// it updates the value in the DB and in memory.
-func (d *Daemon) ConfigValueSet(key string, value string) error {
- if err := dbConfigValueSet(d.db, key, value); err != nil {
- return err
- }
-
- if d.configValues == nil {
- if _, err := d.ConfigValuesGet(); err != nil {
- return err
- }
- }
+func (d *Daemon) PasswordCheck(password string) error {
+ value := daemonConfig["core.trust_password"].Get()
+ // No password set
if value == "" {
- delete(d.configValues, key)
- } else {
- d.configValues[key] = value
- }
-
- return nil
-}
-
-// PasswordSet sets the password to the new value.
-func (d *Daemon) PasswordSet(password string) error {
- shared.Log.Info("Setting new https password")
- var value = password
- if password != "" {
- buf := make([]byte, pwSaltBytes)
- _, err := io.ReadFull(rand.Reader, buf)
- if err != nil {
- return err
- }
-
- hash, err := scrypt.Key([]byte(password), buf, 1<<14, 8, 1, pwHashBytes)
- if err != nil {
- return err
- }
-
- buf = append(buf, hash...)
- value = hex.EncodeToString(buf)
+ return fmt.Errorf("No password is set")
}
- err := d.ConfigValueSet("core.trust_password", value)
+ // Compare the password
+ buff, err := hex.DecodeString(value)
if err != nil {
return err
}
- return nil
-}
-
-// PasswordCheck checks if the given password is the same
-// as we have in the DB.
-func (d *Daemon) PasswordCheck(password string) bool {
- value, err := d.ConfigValueGet("core.trust_password")
+ salt := buff[0:32]
+ hash, err := scrypt.Key([]byte(password), salt, 1<<14, 8, 1, 64)
if err != nil {
- shared.Log.Error("verifyAdminPwd", log.Ctx{"err": err})
- return false
- }
-
- // No password set
- if value == "" {
- return false
+ return err
}
- buff, err := hex.DecodeString(value)
- if err != nil {
- shared.Log.Error("hex decode failed", log.Ctx{"err": err})
- return false
+ if !bytes.Equal(hash, buff[32:]) {
+ return fmt.Errorf("Bad password provided")
}
- salt := buff[0:pwSaltBytes]
- hash, err := scrypt.Key([]byte(password), salt, 1<<14, 8, 1, pwHashBytes)
- if err != nil {
- shared.Log.Error("Failed to create hash to check", log.Ctx{"err": err})
- return false
- }
- if !bytes.Equal(hash, buff[pwSaltBytes:]) {
- shared.Log.Error("Bad password received", log.Ctx{"err": err})
- return false
- }
- shared.Log.Debug("Verified the admin password")
- return true
+ return nil
}
type lxdHttpServer struct {
@@ -1265,18 +1106,18 @@ type lxdHttpServer struct {
}
func (s *lxdHttpServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- allowedOrigin, _ := s.d.ConfigValueGet("core.https_allowed_origin")
+ allowedOrigin := daemonConfig["core.https_allowed_origin"].Get()
origin := req.Header.Get("Origin")
if allowedOrigin != "" && origin != "" {
rw.Header().Set("Access-Control-Allow-Origin", allowedOrigin)
}
- allowedMethods, _ := s.d.ConfigValueGet("core.https_allowed_methods")
+ allowedMethods := daemonConfig["core.https_allowed_methods"].Get()
if allowedMethods != "" && origin != "" {
rw.Header().Set("Access-Control-Allow-Methods", allowedMethods)
}
- allowedHeaders, _ := s.d.ConfigValueGet("core.https_allowed_headers")
+ allowedHeaders := daemonConfig["core.https_allowed_headers"].Get()
if allowedHeaders != "" && origin != "" {
rw.Header().Set("Access-Control-Allow-Headers", allowedHeaders)
}
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
new file mode 100644
index 0000000..5e83233
--- /dev/null
+++ b/lxd/daemon_config.go
@@ -0,0 +1,319 @@
+package main
+
+import (
+ "crypto/rand"
+ "database/sql"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/crypto/scrypt"
+ log "gopkg.in/inconshreveable/log15.v2"
+
+ "github.com/lxc/lxd/shared"
+)
+
+var daemonConfigLock sync.Mutex
+var daemonConfig map[string]*daemonConfigKey
+
+type daemonConfigKey struct {
+ valueType string
+ defaultValue string
+ validValues []string
+ currentValue string
+ hiddenValue bool
+
+ validator func(d *Daemon, key string, value string) error
+ setter func(d *Daemon, key string, value string) (string, error)
+ trigger func(d *Daemon, key string, value string)
+}
+
+func (k *daemonConfigKey) name() string {
+ name := ""
+
+ // Look for a matching entry in daemonConfig
+ daemonConfigLock.Lock()
+ for key, value := range daemonConfig {
+ if value == k {
+ name = key
+ break
+ }
+ }
+ daemonConfigLock.Unlock()
+
+ return name
+}
+
+func (k *daemonConfigKey) Validate(d *Daemon, value string) error {
+ // No need to validate when unsetting
+ if value == "" {
+ return nil
+ }
+
+ // Validate booleans
+ if k.valueType == "bool" && !shared.StringInSlice(strings.ToLower(value), []string{"true", "false", "1", "0", "yes", "no"}) {
+ return fmt.Errorf("Invalid value for a boolean: %s", value)
+ }
+
+ // Validate integers
+ if k.valueType == "int" {
+ _, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Check against valid values
+ if k.validValues != nil && !shared.StringInSlice(value, k.validValues) {
+ return fmt.Errorf("Invalid value, only the following values are allowed: %s", k.validValues)
+ }
+
+ // Run external validation function
+ if k.validator != nil {
+ err := k.validator(d, k.name(), value)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (k *daemonConfigKey) Set(d *Daemon, value string) error {
+ var name string
+
+ // Check if we are actually changing things
+ oldValue := k.currentValue
+ if oldValue == value {
+ return nil
+ }
+
+ // Validate the new value
+ err := k.Validate(d, value)
+ if err != nil {
+ return err
+ }
+
+ // Run external setting function
+ if k.setter != nil {
+ value, err = k.setter(d, k.name(), value)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Get the configuration key and make sure daemonConfig is sane
+ name = k.name()
+ if name == "" {
+ return fmt.Errorf("Corrupted configuration cache")
+ }
+
+ // Actually apply the change
+ daemonConfigLock.Lock()
+ k.currentValue = value
+ daemonConfigLock.Unlock()
+
+ err = dbConfigValueSet(d.db, name, value)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (k *daemonConfigKey) Get() string {
+ value := k.currentValue
+
+ // Get the default value if not set
+ if value == "" {
+ value = k.defaultValue
+ }
+
+ return value
+}
+
+func (k *daemonConfigKey) GetBool() bool {
+ value := k.currentValue
+
+ // Get the default value if not set
+ if value == "" {
+ value = k.defaultValue
+ }
+
+ // Convert to boolean
+ if shared.StringInSlice(strings.ToLower(value), []string{"true", "1", "yes"}) {
+ return true
+ }
+
+ return false
+}
+
+func (k *daemonConfigKey) GetInt64() int64 {
+ value := k.currentValue
+
+ // Get the default value if not set
+ if value == "" {
+ value = k.defaultValue
+ }
+
+ // Convert to int64
+ ret, _ := strconv.ParseInt(value, 10, 64)
+ return ret
+}
+
+func daemonConfigInit(db *sql.DB) error {
+ // Set all the keys
+ daemonConfig = map[string]*daemonConfigKey{
+ "core.https_address": &daemonConfigKey{valueType: "string", setter: daemonConfigSetAddress},
+ "core.https_allowed_headers": &daemonConfigKey{valueType: "string"},
+ "core.https_allowed_methods": &daemonConfigKey{valueType: "string"},
+ "core.https_allowed_origin": &daemonConfigKey{valueType: "string"},
+ "core.proxy_http": &daemonConfigKey{valueType: "string", setter: daemonConfigSetProxy},
+ "core.proxy_https": &daemonConfigKey{valueType: "string", setter: daemonConfigSetProxy},
+ "core.proxy_ignore_hosts": &daemonConfigKey{valueType: "string", setter: daemonConfigSetProxy},
+ "core.trust_password": &daemonConfigKey{valueType: "string", hiddenValue: true, setter: daemonConfigSetPassword},
+
+ "images.auto_update_cached": &daemonConfigKey{valueType: "bool", defaultValue: "true"},
+ "images.auto_update_interval": &daemonConfigKey{valueType: "int", defaultValue: "6"},
+ "images.compression_algorithm": &daemonConfigKey{valueType: "string", defaultValue: "gzip"},
+ "images.remote_cache_expiry": &daemonConfigKey{valueType: "int", defaultValue: "10", trigger: daemonConfigTriggerExpiry},
+
+ "storage.lvm_fstype": &daemonConfigKey{valueType: "string", defaultValue: "ext4", validValues: []string{"ext4", "xfs"}},
+ "storage.lvm_thinpool_name": &daemonConfigKey{valueType: "string", defaultValue: "LXDPool", validator: storageLVMValidateThinPoolName},
+ "storage.lvm_vg_name": &daemonConfigKey{valueType: "string", validator: storageLVMValidateVolumeGroupName, setter: daemonConfigSetStorage},
+ "storage.lvm_volume_size": &daemonConfigKey{valueType: "string", defaultValue: "10GiB"},
+ "storage.zfs_pool_name": &daemonConfigKey{valueType: "string", validator: storageZFSValidatePoolName, setter: daemonConfigSetStorage},
+ "storage.zfs_remove_snapshots": &daemonConfigKey{valueType: "bool"},
+ }
+
+ // Load the values from the DB
+ dbValues, err := dbConfigValuesGet(db)
+ if err != nil {
+ return err
+ }
+
+ daemonConfigLock.Lock()
+ for k, v := range dbValues {
+ _, ok := daemonConfig[k]
+ if !ok {
+ shared.Log.Error("Found invalid configuration key in database", log.Ctx{"key": k})
+ }
+
+ daemonConfig[k].currentValue = v
+ }
+ daemonConfigLock.Unlock()
+
+ return nil
+}
+
+func daemonConfigRender() map[string]interface{} {
+ config := map[string]interface{}{}
+
+ // Turn the config into a JSON-compatible map
+ for k, v := range daemonConfig {
+ value := v.Get()
+ if value != v.defaultValue {
+ if v.hiddenValue {
+ config[k] = true
+ } else {
+ config[k] = value
+ }
+ }
+ }
+
+ return config
+}
+
+func daemonConfigSetPassword(d *Daemon, key string, value string) (string, error) {
+ // Nothing to do on unset
+ if value == "" {
+ return value, nil
+ }
+
+ // Hash the password
+ buf := make([]byte, 32)
+ _, err := io.ReadFull(rand.Reader, buf)
+ if err != nil {
+ return "", err
+ }
+
+ hash, err := scrypt.Key([]byte(value), buf, 1<<14, 8, 1, 64)
+ if err != nil {
+ return "", err
+ }
+
+ buf = append(buf, hash...)
+ value = hex.EncodeToString(buf)
+
+ return value, nil
+}
+
+func daemonConfigSetStorage(d *Daemon, key string, value string) (string, error) {
+ driver := ""
+
+ // Guess the driver name from the key
+ switch key {
+ case "storage.lvm_vg_name":
+ driver = "lvm"
+ case "storage.zfs_pool_name":
+ driver = "zfs"
+ }
+
+ // Should never actually hit this
+ if driver == "" {
+ return "", fmt.Errorf("Invalid storage key: %s", key)
+ }
+
+ // Update the current storage driver
+ err := d.SetupStorageDriver(driver)
+ if err != nil {
+ return "", err
+ }
+
+ return value, nil
+}
+
+func daemonConfigSetAddress(d *Daemon, key string, value string) (string, error) {
+ // Update the current https address
+ err := d.UpdateHTTPsPort(value)
+ if err != nil {
+ return "", err
+ }
+
+ return value, nil
+}
+
+func daemonConfigSetProxy(d *Daemon, key string, value string) (string, error) {
+ // Get the current config
+ config := map[string]string{}
+ config["core.proxy_https"] = daemonConfig["core.proxy_https"].Get()
+ config["core.proxy_http"] = daemonConfig["core.proxy_http"].Get()
+ config["core.proxy_ignore_hosts"] = daemonConfig["core.proxy_ignore_hosts"].Get()
+
+ // Apply the change
+ config[key] = value
+
+ // Update the cached proxy function
+ d.proxy = shared.ProxyFromConfig(
+ config["core.proxy_https"],
+ config["core.proxy_http"],
+ config["core.proxy_ignore_hosts"],
+ )
+
+ // Clear the simplestreams cache as it's tied to the old proxy config
+ imageStreamCacheLock.Lock()
+ for k, _ := range imageStreamCache {
+ delete(imageStreamCache, k)
+ }
+ imageStreamCacheLock.Unlock()
+
+ return value, nil
+}
+
+func daemonConfigTriggerExpiry(d *Daemon, key string, value string) {
+ // Trigger an image pruning run
+ d.pruneChan <- true
+}
diff --git a/lxd/db_images.go b/lxd/db_images.go
index efaefd4..76ce7bb 100644
--- a/lxd/db_images.go
+++ b/lxd/db_images.go
@@ -38,7 +38,7 @@ func dbImagesGet(db *sql.DB, public bool) ([]string, error) {
return results, nil
}
-func dbImagesGetExpired(db *sql.DB, expiry int) ([]string, error) {
+func dbImagesGetExpired(db *sql.DB, expiry int64) ([]string, error) {
q := `SELECT fingerprint FROM images WHERE cached=1 AND creation_date<=strftime('%s', date('now', '-` + fmt.Sprintf("%d", expiry) + ` day'))`
var fp string
diff --git a/lxd/db_update.go b/lxd/db_update.go
index d5c6956..0534b50 100644
--- a/lxd/db_update.go
+++ b/lxd/db_update.go
@@ -236,10 +236,7 @@ func dbUpdateFromV15(d *Daemon) error {
return err
}
- vgName, err := d.ConfigValueGet("storage.lvm_vg_name")
- if err != nil {
- return fmt.Errorf("Error checking server config: %v", err)
- }
+ vgName := daemonConfig["storage.lvm_vg_name"].Get()
for _, cName := range cNames {
var lvLinkPath string
diff --git a/lxd/images.go b/lxd/images.go
index 28c8a90..1a78796 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -230,17 +230,8 @@ func imgPostContInfo(d *Daemon, r *http.Request, req imagePostReq,
}
tarfile.Close()
- compress, err := d.ConfigValueGet("images.compression_algorithm")
- if err != nil {
- return info, err
- }
-
- // Default to gzip for this
- if compress == "" {
- compress = "gzip"
- }
-
var compressedPath string
+ compress := daemonConfig["images.compression_algorithm"].Get()
if compress != "none" {
compressedPath, err = compressFile(tarfile.Name(), compress)
if err != nil {
@@ -889,33 +880,22 @@ func autoUpdateImages(d *Daemon) {
func pruneExpiredImages(d *Daemon) {
shared.Debugf("Pruning expired images")
- expiry, err := d.ConfigValueGet("images.remote_cache_expiry")
- if err != nil {
- shared.Log.Error("Unable to read the images.remote_cache_expiry key")
- return
- }
-
- if expiry == "" {
- expiry = "10"
- }
-
- expiryInt, err := strconv.Atoi(expiry)
- if err != nil {
- shared.Log.Error("Invalid value for images.remote_cache_expiry", log.Ctx{"err": err})
- return
- }
- images, err := dbImagesGetExpired(d.db, expiryInt)
+ // Get the list of expires images
+ expiry := daemonConfig["images.remote_cache_expiry"].GetInt64()
+ images, err := dbImagesGetExpired(d.db, expiry)
if err != nil {
shared.Log.Error("Unable to retrieve the list of expired images", log.Ctx{"err": err})
return
}
+ // Delete them
for _, fp := range images {
if err := doDeleteImage(d, fp); err != nil {
shared.Log.Error("Error deleting image", log.Ctx{"err": err, "fp": fp})
}
}
+
shared.Debugf("Done pruning expired images")
}
diff --git a/lxd/main.go b/lxd/main.go
index 2065163..14cafbb 100644
--- a/lxd/main.go
+++ b/lxd/main.go
@@ -474,11 +474,7 @@ func cmdActivateIfNeeded() error {
}
// Look for network socket
- value, err := d.ConfigValueGet("core.https_address")
- if err != nil {
- return err
- }
-
+ value := daemonConfig["core.https_address"].Get()
if value != "" {
shared.Debugf("Daemon has core.https_address set, activating...")
_, err := lxd.NewClient(&lxd.DefaultConfig, "local")
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 9f571aa..5fb501d 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -17,9 +17,6 @@ import (
log "gopkg.in/inconshreveable/log15.v2"
)
-var storageLvmDefaultThinLVSize = "10GiB"
-var storageLvmDefaultThinPoolName = "LXDPool"
-
func storageLVMCheckVolumeGroup(vgName string) error {
output, err := exec.Command("vgdisplay", "-s", vgName).CombinedOutput()
if err != nil {
@@ -53,18 +50,8 @@ func storageLVMThinpoolExists(vgName string, poolName string) (bool, error) {
func storageLVMGetThinPoolUsers(d *Daemon) ([]string, error) {
results := []string{}
- vgname, err := d.ConfigValueGet("storage.lvm_vg_name")
- if err != nil {
- return results, fmt.Errorf("Error getting lvm_vg_name config")
- }
- if vgname == "" {
- return results, nil
- }
- poolname, err := d.ConfigValueGet("storage.lvm_thinpool_name")
- if err != nil {
- return results, fmt.Errorf("Error getting lvm_thinpool_name config")
- }
- if poolname == "" {
+
+ if daemonConfig["storage.lvm_vg_name"].Get() == "" {
return results, nil
}
@@ -72,6 +59,7 @@ func storageLVMGetThinPoolUsers(d *Daemon) ([]string, error) {
if err != nil {
return results, err
}
+
for _, cName := range cNames {
var lvLinkPath string
if strings.Contains(cName, shared.SnapshotDelimiter) {
@@ -100,63 +88,52 @@ func storageLVMGetThinPoolUsers(d *Daemon) ([]string, error) {
return results, nil
}
-func storageLVMSetThinPoolNameConfig(d *Daemon, poolname string) error {
+func storageLVMValidateThinPoolName(d *Daemon, key string, value string) error {
users, err := storageLVMGetThinPoolUsers(d)
if err != nil {
return fmt.Errorf("Error checking if a pool is already in use: %v", err)
}
+
if len(users) > 0 {
return fmt.Errorf("Can not change LVM config. Images or containers are still using LVs: %v", users)
}
- vgname, err := d.ConfigValueGet("storage.lvm_vg_name")
- if err != nil {
- return fmt.Errorf("Error getting lvm_vg_name config: %v", err)
- }
-
- if poolname != "" {
+ vgname := daemonConfig["storage.lvm_vg_name"].Get()
+ if value != "" {
if vgname == "" {
return fmt.Errorf("Can not set lvm_thinpool_name without lvm_vg_name set.")
}
- poolExists, err := storageLVMThinpoolExists(vgname, poolname)
+ poolExists, err := storageLVMThinpoolExists(vgname, value)
if err != nil {
- return fmt.Errorf("Error checking for thin pool '%s' in '%s': %v", poolname, vgname, err)
+ return fmt.Errorf("Error checking for thin pool '%s' in '%s': %v", value, vgname, err)
}
+
if !poolExists {
- return fmt.Errorf("Pool '%s' does not exist in Volume Group '%s'", poolname, vgname)
+ return fmt.Errorf("Pool '%s' does not exist in Volume Group '%s'", value, vgname)
}
}
- err = d.ConfigValueSet("storage.lvm_thinpool_name", poolname)
- if err != nil {
- return err
- }
-
return nil
}
-func storageLVMSetVolumeGroupNameConfig(d *Daemon, vgname string) error {
+func storageLVMValidateVolumeGroupName(d *Daemon, key string, value string) error {
users, err := storageLVMGetThinPoolUsers(d)
if err != nil {
return fmt.Errorf("Error checking if a pool is already in use: %v", err)
}
+
if len(users) > 0 {
return fmt.Errorf("Can not change LVM config. Images or containers are still using LVs: %v", users)
}
- if vgname != "" {
- err = storageLVMCheckVolumeGroup(vgname)
+ if value != "" {
+ err = storageLVMCheckVolumeGroup(value)
if err != nil {
return err
}
}
- err = d.ConfigValueSet("storage.lvm_vg_name", vgname)
- if err != nil {
- return err
- }
-
return nil
}
@@ -210,10 +187,7 @@ func (s *storageLvm) Init(config map[string]interface{}) (storage, error) {
}
if config["vgName"] == nil {
- vgName, err := s.d.ConfigValueGet("storage.lvm_vg_name")
- if err != nil {
- return s, fmt.Errorf("Error checking server config: %v", err)
- }
+ vgName := daemonConfig["storage.lvm_vg_name"].Get()
if vgName == "" {
return s, fmt.Errorf("LVM isn't enabled")
}
@@ -345,17 +319,8 @@ func (s *storageLvm) ContainerCreateFromImage(
return err
}
- var fstype string
- fstype, err = s.d.ConfigValueGet("storage.lvm_fstype")
- if err != nil {
- return fmt.Errorf("Error checking server config, err=%v", err)
- }
-
- if fstype == "" {
- fstype = "ext4"
- }
-
// Generate a new xfs's UUID
+ fstype := daemonConfig["storage.lvm_fstype"].Get()
if fstype == "xfs" {
err := xfsGenerateNewUUID(lvpath)
if err != nil {
@@ -458,16 +423,9 @@ func (s *storageLvm) ContainerCopy(container container, sourceContainer containe
func (s *storageLvm) ContainerStart(container container) error {
lvName := containerNameToLVName(container.Name())
lvpath := fmt.Sprintf("/dev/%s/%s", s.vgName, lvName)
- fstype, err := s.d.ConfigValueGet("storage.lvm_fstype")
- if err != nil {
- return fmt.Errorf("Error checking server config, err=%v", err)
- }
+ fstype := daemonConfig["storage.lvm_fstype"].Get()
- if fstype == "" {
- fstype = "ext4"
- }
-
- err = tryMount(lvpath, container.Path(), fstype, 0, "discard")
+ err := tryMount(lvpath, container.Path(), fstype, 0, "discard")
if err != nil {
return fmt.Errorf(
"Error mounting snapshot LV path='%s': %v",
@@ -702,17 +660,8 @@ func (s *storageLvm) ContainerSnapshotStart(container container) error {
}
}
- var fstype string
- fstype, err = s.d.ConfigValueGet("storage.lvm_fstype")
- if err != nil {
- return fmt.Errorf("Error checking server config, err=%v", err)
- }
-
- if fstype == "" {
- fstype = "ext4"
- }
-
// Generate a new xfs's UUID
+ fstype := daemonConfig["storage.lvm_fstype"].Get()
if fstype == "xfs" {
err := xfsGenerateNewUUID(lvpath)
if err != nil {
@@ -775,21 +724,11 @@ func (s *storageLvm) ImageCreate(fingerprint string) error {
}
}()
- var fstype string
- fstype, err = s.d.ConfigValueGet("storage.lvm_fstype")
- if err != nil {
- return fmt.Errorf("Error checking server config, err=%v", err)
- }
-
- if fstype == "" {
- fstype = "ext4"
- }
-
+ fstype := daemonConfig["storage.lvm_fstype"].Get()
err = tryMount(lvpath, tempLVMountPoint, fstype, 0, "discard")
if err != nil {
shared.Logf("Error mounting image LV for untarring: %v", err)
return fmt.Errorf("Error mounting image LV: %v", err)
-
}
untarErr := untarImage(finalName, tempLVMountPoint)
@@ -833,24 +772,26 @@ func (s *storageLvm) ImageDelete(fingerprint string) error {
}
func (s *storageLvm) createDefaultThinPool() (string, error) {
+ thinPoolName := daemonConfig["storage.lvm_thinpool_name"].Get()
+
// Create a tiny 1G thinpool
output, err := tryExec(
"lvcreate",
"--poolmetadatasize", "1G",
"-L", "1G",
"--thinpool",
- fmt.Sprintf("%s/%s", s.vgName, storageLvmDefaultThinPoolName))
+ fmt.Sprintf("%s/%s", s.vgName, thinPoolName))
if err != nil {
s.log.Error(
"Could not create thin pool",
log.Ctx{
- "name": storageLvmDefaultThinPoolName,
+ "name": thinPoolName,
"err": err,
"output": string(output)})
return "", fmt.Errorf(
- "Could not create LVM thin pool named %s", storageLvmDefaultThinPoolName)
+ "Could not create LVM thin pool named %s", thinPoolName)
}
// Grow it to the maximum VG size (two step process required by old LVM)
@@ -858,49 +799,41 @@ func (s *storageLvm) createDefaultThinPool() (string, error) {
"lvextend",
"--alloc", "anywhere",
"-l", "100%FREE",
- fmt.Sprintf("%s/%s", s.vgName, storageLvmDefaultThinPoolName))
+ fmt.Sprintf("%s/%s", s.vgName, thinPoolName))
if err != nil {
s.log.Error(
"Could not grow thin pool",
log.Ctx{
- "name": storageLvmDefaultThinPoolName,
+ "name": thinPoolName,
"err": err,
"output": string(output)})
return "", fmt.Errorf(
- "Could not grow LVM thin pool named %s", storageLvmDefaultThinPoolName)
+ "Could not grow LVM thin pool named %s", thinPoolName)
}
- return storageLvmDefaultThinPoolName, nil
+ return thinPoolName, nil
}
func (s *storageLvm) createThinLV(lvname string) (string, error) {
- poolname, err := s.d.ConfigValueGet("storage.lvm_thinpool_name")
- if err != nil {
- return "", fmt.Errorf("Error checking server config, err=%v", err)
- }
+ var err error
+ poolname := daemonConfig["storage.lvm_thinpool_name"].Get()
if poolname == "" {
poolname, err = s.createDefaultThinPool()
if err != nil {
return "", fmt.Errorf("Error creating LVM thin pool: %v", err)
}
- err = storageLVMSetThinPoolNameConfig(s.d, poolname)
+
+ err = storageLVMValidateThinPoolName(s.d, "", poolname)
if err != nil {
s.log.Error("Setting thin pool name", log.Ctx{"err": err})
return "", fmt.Errorf("Error setting LVM thin pool config: %v", err)
}
}
- lvSize, err := s.d.ConfigValueGet("storage.lvm_volume_size")
- if err != nil {
- return "", fmt.Errorf("Error checking server config, err=%v", err)
- }
-
- if lvSize == "" {
- lvSize = storageLvmDefaultThinLVSize
- }
+ lvSize := daemonConfig["storage.lvm_volume_size"].Get()
output, err := tryExec(
"lvcreate",
@@ -908,7 +841,6 @@ func (s *storageLvm) createThinLV(lvname string) (string, error) {
"-n", lvname,
"--virtualsize", lvSize,
fmt.Sprintf("%s/%s", s.vgName, poolname))
-
if err != nil {
s.log.Error("Could not create LV", log.Ctx{"lvname": lvname, "output": string(output)})
return "", fmt.Errorf("Could not create thin LV named %s", lvname)
@@ -916,8 +848,7 @@ func (s *storageLvm) createThinLV(lvname string) (string, error) {
lvpath := fmt.Sprintf("/dev/%s/%s", s.vgName, lvname)
- fstype, err := s.d.ConfigValueGet("storage.lvm_fstype")
-
+ fstype := daemonConfig["storage.lvm_fstype"].Get()
switch fstype {
case "xfs":
output, err = tryExec(
@@ -932,7 +863,7 @@ func (s *storageLvm) createThinLV(lvname string) (string, error) {
}
if err != nil {
- s.log.Error("mkfs.ext4", log.Ctx{"output": string(output)})
+ s.log.Error("Filesystem creation failed", log.Ctx{"output": string(output)})
return "", fmt.Errorf("Error making filesystem on image LV: %v", err)
}
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 162ba18..7b35aa9 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -34,11 +34,7 @@ func (s *storageZfs) Init(config map[string]interface{}) (storage, error) {
}
if config["zfsPool"] == nil {
- zfsPool, err := s.d.ConfigValueGet("storage.zfs_pool_name")
- if err != nil {
- return s, fmt.Errorf("Error checking server config: %v", err)
- }
-
+ zfsPool := daemonConfig["storage.zfs_pool_name"].Get()
if zfsPool == "" {
return s, fmt.Errorf("ZFS isn't enabled")
}
@@ -173,12 +169,7 @@ func (s *storageZfs) ContainerCanRestore(container container, sourceContainer co
}
if snaps[len(snaps)-1].Name() != sourceContainer.Name() {
- v, err := s.d.ConfigValueGet("storage.zfs_remove_snapshots")
- if err != nil {
- return err
- }
-
- if v != "true" {
+ if !daemonConfig["storage.zfs_remove_snapshots"].GetBool() {
return fmt.Errorf("ZFS can only restore from the latest snapshot. Delete newer snapshots or copy the snapshot into a new container instead.")
}
@@ -1111,7 +1102,7 @@ func (s *storageZfs) zfsGetPoolUsers() ([]string, error) {
}
// Global functions
-func storageZFSSetPoolNameConfig(d *Daemon, poolname string) error {
+func storageZFSValidatePoolName(d *Daemon, key string, value string) error {
s := storageZfs{}
// Confirm the backend is working
@@ -1121,20 +1112,15 @@ func storageZFSSetPoolNameConfig(d *Daemon, poolname string) error {
}
// Confirm the new pool exists and is compatible
- if poolname != "" {
- err = s.zfsCheckPool(poolname)
+ if value != "" {
+ err = s.zfsCheckPool(value)
if err != nil {
return fmt.Errorf("Invalid ZFS pool: %v", err)
}
}
- // Check if we're switching pools
- oldPoolname, err := d.ConfigValueGet("storage.zfs_pool_name")
- if err != nil {
- return err
- }
-
// Confirm the old pool isn't in use anymore
+ oldPoolname := daemonConfig["storage.zfs_pool_name"].Get()
if oldPoolname != "" {
s.zfsPool = oldPoolname
@@ -1147,13 +1133,6 @@ func storageZFSSetPoolNameConfig(d *Daemon, poolname string) error {
return fmt.Errorf("Can not change ZFS config. Images or containers are still using the ZFS pool: %v", users)
}
}
- s.zfsPool = poolname
-
- // All good, set the new pool name
- err = d.ConfigValueSet("storage.zfs_pool_name", poolname)
- if err != nil {
- return err
- }
return nil
}
More information about the lxc-devel
mailing list