[lxc-devel] [lxd/master] lxd: Renames containerLoadNodeAll to instanceLoadNodeAll
tomponline on Github
lxc-bot at linuxcontainers.org
Tue Sep 17 07:26:13 UTC 2019
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 404 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20190917/f35d27ab/attachment-0001.bin>
-------------- next part --------------
From 32905cc9c26be901e976c8f6aab6e4714f28d0f6 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 16 Sep 2019 17:55:10 +0100
Subject: [PATCH] lxd: Renames containerLoadNodeAll to instanceLoadNodeAll
- And changes return type to []Instance
Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
lxd/container.go | 108 ++++++++++++++++++-----------------
lxd/container_lxc.go | 4 +-
lxd/containers.go | 42 +++++++-------
lxd/containers_get.go | 6 +-
lxd/daemon.go | 6 +-
lxd/devices.go | 38 ++++++------
lxd/devlxd.go | 14 +++--
lxd/networks.go | 45 ++++++++-------
lxd/networks_utils.go | 25 ++++----
lxd/storage_volumes_utils.go | 45 +++++++--------
10 files changed, 173 insertions(+), 160 deletions(-)
diff --git a/lxd/container.go b/lxd/container.go
index de03c8e5c6..45c7687dba 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -35,10 +35,10 @@ import (
)
func init() {
- // Expose containerLoadNodeAll to the device package converting the response to a slice of InstanceIdentifiers.
+ // Expose instanceLoadNodeAll to the device package converting the response to a slice of InstanceIdentifiers.
// This is because container types are defined in the main package and are not importable.
device.InstanceLoadNodeAll = func(s *state.State) ([]device.InstanceIdentifier, error) {
- containers, err := containerLoadNodeAll(s)
+ containers, err := instanceLoadNodeAll(s)
if err != nil {
return nil, err
}
@@ -994,7 +994,7 @@ func instanceLoadByProjectAndName(s *state.State, project, name string) (Instanc
return c, nil
}
-func containerLoadByProject(s *state.State, project string) ([]container, error) {
+func instanceLoadByProject(s *state.State, project string) ([]Instance, error) {
// Get all the containers
var cts []db.Instance
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
@@ -1014,11 +1014,11 @@ func containerLoadByProject(s *state.State, project string) ([]container, error)
return nil, err
}
- return containerLoadAllInternal(cts, s)
+ return instanceLoadAllInternal(cts, s)
}
-// Load all containers across all projects.
-func containerLoadFromAllProjects(s *state.State) ([]container, error) {
+// Load all instances across all projects.
+func instanceLoadFromAllProjects(s *state.State) ([]Instance, error) {
var projects []string
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
@@ -1030,25 +1030,25 @@ func containerLoadFromAllProjects(s *state.State) ([]container, error) {
return nil, err
}
- containers := []container{}
+ instances := []Instance{}
for _, project := range projects {
- projectContainers, err := containerLoadByProject(s, project)
+ projectInstances, err := instanceLoadByProject(s, project)
if err != nil {
- return nil, errors.Wrapf(nil, "Load containers in project %s", project)
+ return nil, errors.Wrapf(nil, "Load instances in project %s", project)
}
- containers = append(containers, projectContainers...)
+ instances = append(instances, projectInstances...)
}
- return containers, nil
+ return instances, nil
}
// Legacy interface.
-func containerLoadAll(s *state.State) ([]container, error) {
- return containerLoadByProject(s, "default")
+func instanceLoadAll(s *state.State) ([]Instance, error) {
+ return instanceLoadByProject(s, "default")
}
-// Load all containers of this nodes.
-func containerLoadNodeAll(s *state.State) ([]container, error) {
+// Load all instances of this nodes.
+func instanceLoadNodeAll(s *state.State) ([]Instance, error) {
// Get all the container arguments
var cts []db.Instance
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
@@ -1064,11 +1064,11 @@ func containerLoadNodeAll(s *state.State) ([]container, error) {
return nil, err
}
- return containerLoadAllInternal(cts, s)
+ return instanceLoadAllInternal(cts, s)
}
-// Load all containers of this nodes under the given project.
-func containerLoadNodeProjectAll(s *state.State, project string, instanceType instance.Type) ([]container, error) {
+// Load all instances of this nodes under the given project.
+func instanceLoadNodeProjectAll(s *state.State, project string, instanceType instance.Type) ([]Instance, error) {
// Get all the container arguments
var cts []db.Instance
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
@@ -1084,19 +1084,19 @@ func containerLoadNodeProjectAll(s *state.State, project string, instanceType in
return nil, err
}
- return containerLoadAllInternal(cts, s)
+ return instanceLoadAllInternal(cts, s)
}
-func containerLoadAllInternal(cts []db.Instance, s *state.State) ([]container, error) {
+func instanceLoadAllInternal(dbInstances []db.Instance, s *state.State) ([]Instance, error) {
// Figure out what profiles are in use
profiles := map[string]map[string]api.Profile{}
- for _, cArgs := range cts {
- projectProfiles, ok := profiles[cArgs.Project]
+ for _, instArgs := range dbInstances {
+ projectProfiles, ok := profiles[instArgs.Project]
if !ok {
projectProfiles = map[string]api.Profile{}
- profiles[cArgs.Project] = projectProfiles
+ profiles[instArgs.Project] = projectProfiles
}
- for _, profile := range cArgs.Profiles {
+ for _, profile := range instArgs.Profiles {
_, ok := projectProfiles[profile]
if !ok {
projectProfiles[profile] = api.Profile{}
@@ -1116,26 +1116,30 @@ func containerLoadAllInternal(cts []db.Instance, s *state.State) ([]container, e
}
}
- // Load the container structs
- containers := []container{}
- for _, container := range cts {
- // Figure out the container's profiles
+ // Load the instances structs
+ instances := []Instance{}
+ for _, dbInstance := range dbInstances {
+ // Figure out the instances's profiles
cProfiles := []api.Profile{}
- for _, name := range container.Profiles {
- cProfiles = append(cProfiles, profiles[container.Project][name])
+ for _, name := range dbInstance.Profiles {
+ cProfiles = append(cProfiles, profiles[dbInstance.Project][name])
}
- args := db.ContainerToArgs(&container)
-
- ct, err := containerLXCLoad(s, args, cProfiles)
- if err != nil {
- return nil, err
+ if dbInstance.Type == instance.TypeContainer {
+ args := db.ContainerToArgs(&dbInstance)
+ ct, err := containerLXCLoad(s, args, cProfiles)
+ if err != nil {
+ return nil, err
+ }
+ instances = append(instances, ct)
+ } else {
+ // TODO add virtual machine load here.
+ continue
}
- containers = append(containers, ct)
}
- return containers, nil
+ return instances, nil
}
func containerCompareSnapshots(source Instance, target Instance) ([]Instance, []Instance, error) {
@@ -1190,15 +1194,15 @@ func containerCompareSnapshots(source Instance, target Instance) ([]Instance, []
func autoCreateContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
f := func(ctx context.Context) {
- // Load all local containers
- allContainers, err := containerLoadNodeAll(d.State())
+ // Load all local instances
+ allContainers, err := instanceLoadNodeAll(d.State())
if err != nil {
logger.Error("Failed to load containers for scheduled snapshots", log.Ctx{"err": err})
return
}
// Figure out which need snapshotting (if any)
- containers := []container{}
+ instances := []Instance{}
for _, c := range allContainers {
schedule := c.ExpandedConfig()["snapshots.schedule"]
@@ -1237,15 +1241,15 @@ func autoCreateContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
continue
}
- containers = append(containers, c)
+ instances = append(instances, c)
}
- if len(containers) == 0 {
+ if len(instances) == 0 {
return
}
opRun := func(op *operation) error {
- return autoCreateContainerSnapshots(ctx, d, containers)
+ return autoCreateContainerSnapshots(ctx, d, instances)
}
op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationSnapshotCreate, nil, nil, opRun, nil, nil)
@@ -1279,9 +1283,9 @@ func autoCreateContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
return f, schedule
}
-func autoCreateContainerSnapshots(ctx context.Context, d *Daemon, containers []container) error {
+func autoCreateContainerSnapshots(ctx context.Context, d *Daemon, instances []Instance) error {
// Make the snapshots
- for _, c := range containers {
+ for _, c := range instances {
ch := make(chan error)
go func() {
snapshotName, err := containerDetermineNextSnapshotName(d, c, "snap%d")
@@ -1333,16 +1337,16 @@ func autoCreateContainerSnapshots(ctx context.Context, d *Daemon, containers []c
func pruneExpiredContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
f := func(ctx context.Context) {
- // Load all local containers
- allContainers, err := containerLoadNodeAll(d.State())
+ // Load all local instances
+ allInstances, err := instanceLoadNodeAll(d.State())
if err != nil {
- logger.Error("Failed to load containers for snapshot expiry", log.Ctx{"err": err})
+ logger.Error("Failed to load instances for snapshot expiry", log.Ctx{"err": err})
return
}
// Figure out which need snapshotting (if any)
expiredSnapshots := []Instance{}
- for _, c := range allContainers {
+ for _, c := range allInstances {
snapshots, err := c.Snapshots()
if err != nil {
logger.Error("Failed to list snapshots", log.Ctx{"err": err, "container": c.Name(), "project": c.Project()})
@@ -1375,14 +1379,14 @@ func pruneExpiredContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
return
}
- logger.Info("Pruning expired container snapshots")
+ logger.Info("Pruning expired instance snapshots")
_, err = op.Run()
if err != nil {
- logger.Error("Failed to remove expired container snapshots", log.Ctx{"err": err})
+ logger.Error("Failed to remove expired instance snapshots", log.Ctx{"err": err})
}
- logger.Info("Done pruning expired container snapshots")
+ logger.Info("Done pruning expired instance snapshots")
}
first := true
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index e48c4f1a86..93b7beaf06 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -867,7 +867,7 @@ func findIdmap(state *state.State, cName string, isolatedStr string, configBase
idmapLock.Lock()
defer idmapLock.Unlock()
- cts, err := containerLoadAll(state)
+ cts, err := instanceLoadAll(state)
if err != nil {
return nil, 0, err
}
@@ -3391,7 +3391,7 @@ func (c *containerLXC) Snapshots() ([]Instance, error) {
}
// Build the snapshot list
- containers, err := containerLoadAllInternal(snaps, c.state)
+ containers, err := instanceLoadAllInternal(snaps, c.state)
if err != nil {
return nil, err
}
diff --git a/lxd/containers.go b/lxd/containers.go
index 8272242d6b..a340642a00 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -141,7 +141,7 @@ var instanceBackupExportCmd = APIEndpoint{
Get: APIEndpointAction{Handler: containerBackupExportGet, AccessHandler: AllowProjectPermission("containers", "view")},
}
-type containerAutostartList []container
+type containerAutostartList []Instance
func (slice containerAutostartList) Len() int {
return len(slice)
@@ -165,22 +165,22 @@ func (slice containerAutostartList) Swap(i, j int) {
}
func containersRestart(s *state.State) error {
- // Get all the containers
- result, err := containerLoadNodeAll(s)
+ // Get all the instances
+ result, err := instanceLoadNodeAll(s)
if err != nil {
return err
}
- containers := []container{}
+ instances := []Instance{}
for _, c := range result {
- containers = append(containers, c)
+ instances = append(instances, c)
}
- sort.Sort(containerAutostartList(containers))
+ sort.Sort(containerAutostartList(instances))
- // Restart the containers
- for _, c := range containers {
+ // Restart the instances
+ for _, c := range instances {
config := c.ExpandedConfig()
lastState := config["volatile.last_state.power"]
@@ -207,7 +207,7 @@ func containersRestart(s *state.State) error {
return nil
}
-type containerStopList []container
+type containerStopList []Instance
func (slice containerStopList) Len() int {
return len(slice)
@@ -263,12 +263,12 @@ func containersShutdown(s *state.State) error {
dbAvailable := true
- // Get all the containers
- containers, err := containerLoadNodeAll(s)
+ // Get all the instances
+ instances, err := instanceLoadNodeAll(s)
if err != nil {
// Mark database as offline
dbAvailable = false
- containers = []container{}
+ instances = []Instance{}
// List all containers on disk
cnames, err := containersOnDisk()
@@ -287,12 +287,12 @@ func containersShutdown(s *state.State) error {
return err
}
- containers = append(containers, c)
+ instances = append(instances, c)
}
}
}
- sort.Sort(containerStopList(containers))
+ sort.Sort(containerStopList(instances))
if dbAvailable {
// Reset all container states
@@ -304,18 +304,18 @@ func containersShutdown(s *state.State) error {
var lastPriority int
- if len(containers) != 0 {
- lastPriority, _ = strconv.Atoi(containers[0].ExpandedConfig()["boot.stop.priority"])
+ if len(instances) != 0 {
+ lastPriority, _ = strconv.Atoi(instances[0].ExpandedConfig()["boot.stop.priority"])
}
- for _, c := range containers {
+ for _, c := range instances {
priority, _ := strconv.Atoi(c.ExpandedConfig()["boot.stop.priority"])
// Enforce shutdown priority
if priority != lastPriority {
lastPriority = priority
- // Wait for containers with higher priority to finish
+ // Wait for instances with higher priority to finish
wg.Wait()
}
@@ -324,7 +324,7 @@ func containersShutdown(s *state.State) error {
// Stop the container
if lastState != "BROKEN" && lastState != "STOPPED" {
- // Determinate how long to wait for the container to shutdown cleanly
+ // Determinate how long to wait for the instance to shutdown cleanly
var timeoutSeconds int
value, ok := c.ExpandedConfig()["boot.host_shutdown_timeout"]
if ok {
@@ -333,9 +333,9 @@ func containersShutdown(s *state.State) error {
timeoutSeconds = 30
}
- // Stop the container
+ // Stop the instance
wg.Add(1)
- go func(c container, lastState string) {
+ go func(c Instance, lastState string) {
c.Shutdown(time.Second * time.Duration(timeoutSeconds))
c.Stop(false)
c.VolatileSet(map[string]string{"volatile.last_state.power": lastState})
diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 8d42f15da3..2d4d147920 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -103,10 +103,10 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
return []string{}, err
}
- // Get the local containers
- nodeCts := map[string]container{}
+ // Get the local instances
+ nodeCts := map[string]Instance{}
if recursion > 0 {
- cts, err := containerLoadNodeProjectAll(d.State(), project, instanceType)
+ cts, err := instanceLoadNodeProjectAll(d.State(), project, instanceType)
if err != nil {
return nil, err
}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 583af879ad..d9a0f1d3a7 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -992,14 +992,14 @@ func (d *Daemon) Ready() error {
}
func (d *Daemon) numRunningContainers() (int, error) {
- results, err := containerLoadNodeAll(d.State())
+ results, err := instanceLoadNodeAll(d.State())
if err != nil {
return 0, err
}
count := 0
- for _, container := range results {
- if container.IsRunning() {
+ for _, instance := range results {
+ if instance.IsRunning() {
count = count + 1
}
}
diff --git a/lxd/devices.go b/lxd/devices.go
index da7c13d42c..aeecf4a69a 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -293,16 +293,16 @@ func deviceTaskBalance(s *state.State) {
return
}
- // Iterate through the containers
- containers, err := containerLoadNodeAll(s)
+ // Iterate through the instances
+ instances, err := instanceLoadNodeAll(s)
if err != nil {
- logger.Error("Problem loading containers list", log.Ctx{"err": err})
+ logger.Error("Problem loading instances list", log.Ctx{"err": err})
return
}
- fixedContainers := map[int][]container{}
- balancedContainers := map[container]int{}
- for _, c := range containers {
+ fixedInstances := map[int][]Instance{}
+ balancedInstances := map[Instance]int{}
+ for _, c := range instances {
conf := c.ExpandedConfig()
cpulimit, ok := conf["limits.cpu"]
if !ok || cpulimit == "" {
@@ -317,7 +317,7 @@ func deviceTaskBalance(s *state.State) {
if err == nil {
// Load-balance
count = min(count, len(cpus))
- balancedContainers[c] = count
+ balancedInstances[c] = count
} else {
// Pinned
containerCpus, err := parseCpuset(cpulimit)
@@ -329,18 +329,18 @@ func deviceTaskBalance(s *state.State) {
continue
}
- _, ok := fixedContainers[nr]
+ _, ok := fixedInstances[nr]
if ok {
- fixedContainers[nr] = append(fixedContainers[nr], c)
+ fixedInstances[nr] = append(fixedInstances[nr], c)
} else {
- fixedContainers[nr] = []container{c}
+ fixedInstances[nr] = []Instance{c}
}
}
}
}
// Balance things
- pinning := map[container][]string{}
+ pinning := map[Instance][]string{}
usage := map[int]deviceTaskCPU{}
for _, id := range cpus {
@@ -353,7 +353,7 @@ func deviceTaskBalance(s *state.State) {
usage[id] = cpu
}
- for cpu, ctns := range fixedContainers {
+ for cpu, ctns := range fixedInstances {
c, ok := usage[cpu]
if !ok {
logger.Errorf("Internal error: container using unavailable cpu")
@@ -376,7 +376,7 @@ func deviceTaskBalance(s *state.State) {
sortedUsage = append(sortedUsage, value)
}
- for ctn, count := range balancedContainers {
+ for ctn, count := range balancedInstances {
sort.Sort(sortedUsage)
for _, cpu := range sortedUsage {
if count == 0 {
@@ -416,12 +416,12 @@ func deviceNetworkPriority(s *state.State, netif string) {
return
}
- containers, err := containerLoadNodeAll(s)
+ instances, err := instanceLoadNodeAll(s)
if err != nil {
return
}
- for _, c := range containers {
+ for _, c := range instances {
// Extract the current priority
networkPriority := c.ExpandedConfig()["limits.network.priority"]
if networkPriority == "" {
@@ -494,16 +494,16 @@ func deviceEventListener(s *state.State) {
// devicesRegister calls the Register() function on all supported devices so they receive events.
func devicesRegister(s *state.State) {
- containers, err := containerLoadNodeAll(s)
+ instances, err := instanceLoadNodeAll(s)
if err != nil {
logger.Error("Problem loading containers list", log.Ctx{"err": err})
return
}
- for _, containerIf := range containers {
- c, ok := containerIf.(*containerLXC)
+ for _, instanceIf := range instances {
+ c, ok := instanceIf.(*containerLXC)
if !ok {
- logger.Errorf("Got non-LXC container")
+ logger.Errorf("Instance is not container type")
continue
}
diff --git a/lxd/devlxd.go b/lxd/devlxd.go
index 3c76c0b030..7ceeb96685 100644
--- a/lxd/devlxd.go
+++ b/lxd/devlxd.go
@@ -483,24 +483,28 @@ func findContainerForPid(pid int32, d *Daemon) (container, error) {
return nil, err
}
- containers, err := containerLoadNodeAll(d.State())
+ instances, err := instanceLoadNodeAll(d.State())
if err != nil {
return nil, err
}
- for _, c := range containers {
- if !c.IsRunning() {
+ for _, inst := range instances {
+ if inst.Type() != instance.TypeContainer {
continue
}
- initpid := c.InitPID()
+ if !inst.IsRunning() {
+ continue
+ }
+
+ initpid := inst.InitPID()
pidNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/pid", initpid))
if err != nil {
return nil, err
}
if origPidNs == pidNs {
- return c, nil
+ return inst.(container), nil
}
}
diff --git a/lxd/networks.go b/lxd/networks.go
index 1e7607231d..11e6b2b699 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -22,6 +22,7 @@ import (
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/device"
"github.com/lxc/lxd/lxd/dnsmasq"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/iptables"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/state"
@@ -429,16 +430,16 @@ func doNetworkGet(d *Daemon, name string) (api.Network, error) {
// Look for containers using the interface
if n.Type != "loopback" {
- cts, err := containerLoadFromAllProjects(d.State())
+ insts, err := instanceLoadFromAllProjects(d.State())
if err != nil {
return api.Network{}, err
}
- for _, c := range cts {
- if networkIsInUse(c, n.Name) {
- uri := fmt.Sprintf("/%s/containers/%s", version.APIVersion, c.Name())
- if c.Project() != "default" {
- uri += fmt.Sprintf("?project=%s", c.Project())
+ for _, inst := range insts {
+ if networkIsInUse(inst, n.Name) {
+ uri := fmt.Sprintf("/%s/containers/%s", version.APIVersion, inst.Name())
+ if inst.Project() != "default" {
+ uri += fmt.Sprintf("?project=%s", inst.Project())
}
n.UsedBy = append(n.UsedBy, uri)
}
@@ -712,24 +713,26 @@ func networkLeasesGet(d *Daemon, r *http.Request) Response {
// Get all static leases
if !isClusterNotification(r) {
- // Get all the containers
- containers, err := containerLoadByProject(d.State(), project)
+ // Get all the instances
+ instances, err := instanceLoadByProject(d.State(), project)
if err != nil {
return SmartError(err)
}
- for _, c := range containers {
+ for _, inst := range instances {
// Go through all its devices (including profiles
- for k, d := range c.ExpandedDevices() {
+ for k, d := range inst.ExpandedDevices() {
// Skip uninteresting entries
if d["type"] != "nic" || d["nictype"] != "bridged" || d["parent"] != name {
continue
}
// Fill in the hwaddr from volatile
- d, err = c.(*containerLXC).fillNetworkDevice(k, d)
- if err != nil {
- continue
+ if inst.Type() == instance.TypeContainer {
+ d, err = inst.(*containerLXC).fillNetworkDevice(k, d)
+ if err != nil {
+ continue
+ }
}
// Record the MAC
@@ -740,21 +743,21 @@ func networkLeasesGet(d *Daemon, r *http.Request) Response {
// Add the lease
if d["ipv4.address"] != "" {
leases = append(leases, api.NetworkLease{
- Hostname: c.Name(),
+ Hostname: inst.Name(),
Address: d["ipv4.address"],
Hwaddr: d["hwaddr"],
Type: "static",
- Location: c.Location(),
+ Location: inst.Location(),
})
}
if d["ipv6.address"] != "" {
leases = append(leases, api.NetworkLease{
- Hostname: c.Name(),
+ Hostname: inst.Name(),
Address: d["ipv6.address"],
Hwaddr: d["hwaddr"],
Type: "static",
- Location: c.Location(),
+ Location: inst.Location(),
})
}
}
@@ -956,14 +959,14 @@ func (n *network) IsRunning() bool {
}
func (n *network) IsUsed() bool {
- // Look for containers using the interface
- cts, err := containerLoadFromAllProjects(n.state)
+ // Look for instances using the interface
+ insts, err := instanceLoadFromAllProjects(n.state)
if err != nil {
return true
}
- for _, c := range cts {
- if networkIsInUse(c, n.name) {
+ for _, inst := range insts {
+ if networkIsInUse(inst, n.name) {
return true
}
}
diff --git a/lxd/networks_utils.go b/lxd/networks_utils.go
index 299d65c8cd..f6a939aef2 100644
--- a/lxd/networks_utils.go
+++ b/lxd/networks_utils.go
@@ -25,6 +25,7 @@ import (
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/device"
"github.com/lxc/lxd/lxd/dnsmasq"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/shared"
@@ -89,7 +90,7 @@ func networkGetInterfaces(cluster *db.Cluster) ([]string, error) {
return networks, nil
}
-func networkIsInUse(c container, name string) bool {
+func networkIsInUse(c Instance, name string) bool {
for _, d := range c.ExpandedDevices() {
if d["type"] != "nic" {
continue
@@ -637,26 +638,28 @@ func networkUpdateStatic(s *state.State, networkName string) error {
networks = []string{networkName}
}
- // Get all the containers
- containers, err := containerLoadNodeAll(s)
+ // Get all the instances
+ insts, err := instanceLoadNodeAll(s)
if err != nil {
return err
}
// Build a list of dhcp host entries
entries := map[string][][]string{}
- for _, c := range containers {
+ for _, inst := range insts {
// Go through all its devices (including profiles
- for k, d := range c.ExpandedDevices() {
+ for k, d := range inst.ExpandedDevices() {
// Skip uninteresting entries
if d["type"] != "nic" || d["nictype"] != "bridged" || !shared.StringInSlice(d["parent"], networks) {
continue
}
- // Fill in the hwaddr from volatile
- d, err = c.(*containerLXC).fillNetworkDevice(k, d)
- if err != nil {
- continue
+ if inst.Type() == instance.TypeContainer {
+ // Fill in the hwaddr from volatile
+ d, err = inst.(*containerLXC).fillNetworkDevice(k, d)
+ if err != nil {
+ continue
+ }
}
// Add the new host entries
@@ -666,7 +669,7 @@ func networkUpdateStatic(s *state.State, networkName string) error {
}
if (shared.IsTrue(d["security.ipv4_filtering"]) && d["ipv4.address"] == "") || (shared.IsTrue(d["security.ipv6_filtering"]) && d["ipv6.address"] == "") {
- curIPv4, curIPv6, err := dnsmasq.DHCPStaticIPs(d["parent"], c.Name())
+ curIPv4, curIPv6, err := dnsmasq.DHCPStaticIPs(d["parent"], inst.Name())
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -680,7 +683,7 @@ func networkUpdateStatic(s *state.State, networkName string) error {
}
}
- entries[d["parent"]] = append(entries[d["parent"]], []string{d["hwaddr"], c.Project(), c.Name(), d["ipv4.address"], d["ipv6.address"]})
+ entries[d["parent"]] = append(entries[d["parent"]], []string{d["hwaddr"], inst.Project(), inst.Name(), d["ipv4.address"], d["ipv6.address"]})
}
}
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index 2c2f3d63d7..b25a1288d9 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -238,20 +238,20 @@ func storagePoolVolumeSnapshotUpdate(state *state.State, poolName string, volume
}
func storagePoolVolumeUsedByContainersGet(s *state.State, project, poolName string, volumeName string) ([]string, error) {
- cts, err := containerLoadByProject(s, project)
+ insts, err := instanceLoadByProject(s, project)
if err != nil {
return []string{}, err
}
ctsUsingVolume := []string{}
- for _, c := range cts {
- for _, dev := range c.LocalDevices() {
+ for _, inst := range insts {
+ for _, dev := range inst.LocalDevices() {
if dev["type"] != "disk" {
continue
}
if dev["pool"] == poolName && dev["source"] == volumeName {
- ctsUsingVolume = append(ctsUsingVolume, c.Name())
+ ctsUsingVolume = append(ctsUsingVolume, inst.Name())
break
}
}
@@ -264,14 +264,14 @@ func storagePoolVolumeUpdateUsers(d *Daemon, oldPoolName string,
oldVolumeName string, newPoolName string, newVolumeName string) error {
s := d.State()
- // update all containers
- cts, err := containerLoadAll(s)
+ // update all instances
+ insts, err := instanceLoadAll(s)
if err != nil {
return err
}
- for _, c := range cts {
- devices := c.LocalDevices()
+ for _, inst := range insts {
+ devices := inst.LocalDevices()
for k := range devices {
if devices[k]["type"] != "disk" {
continue
@@ -298,7 +298,6 @@ func storagePoolVolumeUpdateUsers(d *Daemon, oldPoolName string,
}
// found entry
-
if oldPoolName != newPoolName {
devices[k]["pool"] = newPoolName
}
@@ -313,18 +312,18 @@ func storagePoolVolumeUpdateUsers(d *Daemon, oldPoolName string,
}
args := db.ContainerArgs{
- Architecture: c.Architecture(),
- Description: c.Description(),
- Config: c.LocalConfig(),
+ Architecture: inst.Architecture(),
+ Description: inst.Description(),
+ Config: inst.LocalConfig(),
Devices: devices,
- Ephemeral: c.IsEphemeral(),
- Profiles: c.Profiles(),
- Project: c.Project(),
- Type: c.Type(),
- Snapshot: c.IsSnapshot(),
+ Ephemeral: inst.IsEphemeral(),
+ Profiles: inst.Profiles(),
+ Project: inst.Project(),
+ Type: inst.Type(),
+ Snapshot: inst.IsSnapshot(),
}
- err = c.Update(args, false)
+ err = inst.Update(args, false)
if err != nil {
return err
}
@@ -398,19 +397,19 @@ func storagePoolVolumeUpdateUsers(d *Daemon, oldPoolName string,
func storagePoolVolumeUsedByRunningContainersWithProfilesGet(s *state.State,
poolName string, volumeName string, volumeTypeName string,
runningOnly bool) ([]string, error) {
- cts, err := containerLoadAll(s)
+ insts, err := instanceLoadAll(s)
if err != nil {
return []string{}, err
}
ctsUsingVolume := []string{}
volumeNameWithType := fmt.Sprintf("%s/%s", volumeTypeName, volumeName)
- for _, c := range cts {
- if runningOnly && !c.IsRunning() {
+ for _, inst := range insts {
+ if runningOnly && !inst.IsRunning() {
continue
}
- for _, dev := range c.ExpandedDevices() {
+ for _, dev := range inst.ExpandedDevices() {
if dev["type"] != "disk" {
continue
}
@@ -423,7 +422,7 @@ func storagePoolVolumeUsedByRunningContainersWithProfilesGet(s *state.State,
// "container////bla" but only against "container/bla".
cleanSource := filepath.Clean(dev["source"])
if cleanSource == volumeName || cleanSource == volumeNameWithType {
- ctsUsingVolume = append(ctsUsingVolume, c.Name())
+ ctsUsingVolume = append(ctsUsingVolume, inst.Name())
}
}
}
More information about the lxc-devel
mailing list