[lxc-devel] [lxd/master] lxd: Rename container to instance
stgraber on Github
lxc-bot at linuxcontainers.org
Sat Mar 7 18:40:21 UTC 2020
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20200307/b00b257a/attachment-0001.bin>
-------------- next part --------------
From fec5c7bcd5a9ad0bf7fbaf22fceb28e48a4e43a6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Sat, 7 Mar 2020 16:21:42 +0100
Subject: [PATCH 1/2] lxd: Cleanup error messages
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
lxd/api_internal.go | 2 +-
lxd/backup.go | 6 ++---
lxd/container_console.go | 4 ++--
lxd/container_delete.go | 2 +-
lxd/container_exec.go | 4 ++--
lxd/container_post.go | 40 ++++++++++++++++-----------------
lxd/container_state.go | 6 ++---
lxd/containers.go | 2 +-
lxd/containers_post.go | 16 ++++++-------
lxd/db/containers.go | 41 +++++++++++++++++++++-------------
lxd/db/errors.go | 2 +-
lxd/db/node.go | 2 +-
lxd/db/storage_volumes.go | 2 +-
lxd/device/disk.go | 2 +-
lxd/instance/instance_utils.go | 10 ++++-----
lxd/maas/controller.go | 2 +-
lxd/main_forkexec.go | 2 +-
lxd/profiles_utils.go | 12 +++++-----
lxd/storage/backend_lxd.go | 2 +-
lxd/storage_volumes.go | 2 +-
lxd/template/chroot.go | 2 +-
21 files changed, 87 insertions(+), 76 deletions(-)
diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 78d09e9eb9..1cbdcd79bc 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -626,7 +626,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
Stateful: backupConf.Container.Stateful,
})
if err != nil {
- err = errors.Wrap(err, "Create container")
+ err = errors.Wrap(err, "Create instance")
return response.SmartError(err)
}
diff --git a/lxd/backup.go b/lxd/backup.go
index 8aa9e904dc..2b10e87f67 100644
--- a/lxd/backup.go
+++ b/lxd/backup.go
@@ -261,18 +261,18 @@ func pruneExpiredContainerBackups(ctx context.Context, d *Daemon) error {
// Get the list of expired backups.
backups, err := d.cluster.ContainerBackupsGetExpired()
if err != nil {
- return errors.Wrap(err, "Unable to retrieve the list of expired container backups")
+ return errors.Wrap(err, "Unable to retrieve the list of expired instance backups")
}
for _, b := range backups {
inst, err := instance.LoadByID(d.State(), b.InstanceID)
if err != nil {
- return errors.Wrapf(err, "Error deleting container backup %s", b.Name)
+ return errors.Wrapf(err, "Error deleting instance backup %s", b.Name)
}
err = backup.DoBackupDelete(d.State(), inst.Project(), b.Name, inst.Name())
if err != nil {
- return errors.Wrapf(err, "Error deleting container backup %s", b.Name)
+ return errors.Wrapf(err, "Error deleting instance backup %s", b.Name)
}
}
diff --git a/lxd/container_console.go b/lxd/container_console.go
index 3a40c3525e..6c20df161a 100644
--- a/lxd/container_console.go
+++ b/lxd/container_console.go
@@ -282,12 +282,12 @@ func containerConsolePost(d *Daemon, r *http.Request) response.Response {
return response.SmartError(err)
}
- err = fmt.Errorf("Container is not running")
+ err = fmt.Errorf("Instance is not running")
if !inst.IsRunning() {
return response.BadRequest(err)
}
- err = fmt.Errorf("Container is frozen")
+ err = fmt.Errorf("Instance is frozen")
if inst.IsFrozen() {
return response.BadRequest(err)
}
diff --git a/lxd/container_delete.go b/lxd/container_delete.go
index de89f8c928..4b6c022998 100644
--- a/lxd/container_delete.go
+++ b/lxd/container_delete.go
@@ -34,7 +34,7 @@ func containerDelete(d *Daemon, r *http.Request) response.Response {
}
if c.IsRunning() {
- return response.BadRequest(fmt.Errorf("container is running"))
+ return response.BadRequest(fmt.Errorf("Instance is running"))
}
rmct := func(op *operations.Operation) error {
diff --git a/lxd/container_exec.go b/lxd/container_exec.go
index c15c4afe90..66b95f19fd 100644
--- a/lxd/container_exec.go
+++ b/lxd/container_exec.go
@@ -369,11 +369,11 @@ func containerExecPost(d *Daemon, r *http.Request) response.Response {
}
if !inst.IsRunning() {
- return response.BadRequest(fmt.Errorf("Container is not running"))
+ return response.BadRequest(fmt.Errorf("Instance is not running"))
}
if inst.IsFrozen() {
- return response.BadRequest(fmt.Errorf("Container is frozen"))
+ return response.BadRequest(fmt.Errorf("Instance is frozen"))
}
// Process environment.
diff --git a/lxd/container_post.go b/lxd/container_post.go
index 7387fdf101..069c3ba0ef 100644
--- a/lxd/container_post.go
+++ b/lxd/container_post.go
@@ -83,7 +83,7 @@ func containerPost(d *Daemon, r *http.Request) response.Response {
// Load source node.
address, err := tx.ContainerNodeAddress(project, name, instanceType)
if err != nil {
- return errors.Wrap(err, "Failed to get address of container's node")
+ return errors.Wrap(err, "Failed to get address of instance's node")
}
if address == "" {
// Local node.
@@ -181,12 +181,12 @@ func containerPost(d *Daemon, r *http.Request) response.Response {
// Check if we are migrating a ceph-based container.
poolName, err := d.cluster.InstancePool(project, name)
if err != nil {
- err = errors.Wrap(err, "Failed to fetch container's pool name")
+ err = errors.Wrap(err, "Failed to fetch instance's pool name")
return response.SmartError(err)
}
_, pool, err := d.cluster.StoragePoolGet(poolName)
if err != nil {
- err = errors.Wrap(err, "Failed to fetch container's pool info")
+ err = errors.Wrap(err, "Failed to fetch instance's pool info")
return response.SmartError(err)
}
if pool.Driver == "ceph" {
@@ -198,7 +198,7 @@ func containerPost(d *Daemon, r *http.Request) response.Response {
// here only to handle the case where the container is
// ceph-based.
if sourceNodeOffline {
- err := fmt.Errorf("The cluster member hosting the container is offline")
+ err := fmt.Errorf("The cluster member hosting the instance is offline")
return response.SmartError(err)
}
@@ -324,7 +324,7 @@ func containerPostClusteringMigrate(d *Daemon, c instance.Instance, oldName, new
// First make a copy on the new node of the container to be moved.
entry, _, err := source.GetContainer(oldName)
if err != nil {
- return errors.Wrap(err, "Failed to get container info")
+ return errors.Wrap(err, "Failed to get instance info")
}
args := lxd.ContainerCopyArgs{
@@ -334,23 +334,23 @@ func containerPostClusteringMigrate(d *Daemon, c instance.Instance, oldName, new
copyOp, err := dest.CopyContainer(source, *entry, &args)
if err != nil {
- return errors.Wrap(err, "Failed to issue copy container API request")
+ return errors.Wrap(err, "Failed to issue copy instance API request")
}
err = copyOp.Wait()
if err != nil {
- return errors.Wrap(err, "Copy container operation failed")
+ return errors.Wrap(err, "Copy instance operation failed")
}
// Delete the container on the original node.
deleteOp, err := source.DeleteContainer(oldName)
if err != nil {
- return errors.Wrap(err, "Failed to issue delete container API request")
+ return errors.Wrap(err, "Failed to issue delete instance API request")
}
err = deleteOp.Wait()
if err != nil {
- return errors.Wrap(err, "Delete container operation failed")
+ return errors.Wrap(err, "Delete instance operation failed")
}
// If the destination name is not set, we have generated a random name for
@@ -362,12 +362,12 @@ func containerPostClusteringMigrate(d *Daemon, c instance.Instance, oldName, new
op, err := dest.RenameInstance(destName, instancePost)
if err != nil {
- return errors.Wrap(err, "Failed to issue rename container API request")
+ return errors.Wrap(err, "Failed to issue rename instance API request")
}
err = op.Wait()
if err != nil {
- return errors.Wrap(err, "Rename container operation failed")
+ return errors.Wrap(err, "Rename instance operation failed")
}
destName = oldName
}
@@ -375,7 +375,7 @@ func containerPostClusteringMigrate(d *Daemon, c instance.Instance, oldName, new
// Restore the original value of "volatile.apply_template"
id, err := d.cluster.ContainerID(c.Project(), destName)
if err != nil {
- return errors.Wrap(err, "Failed to get ID of moved container")
+ return errors.Wrap(err, "Failed to get ID of moved instance")
}
err = d.cluster.ContainerConfigRemove(id, "volatile.apply_template")
@@ -416,16 +416,16 @@ func containerPostClusteringMigrateWithCeph(d *Daemon, c instance.Instance, proj
logger.Debugf(`Renaming RBD storage volume for source container "%s" from "%s" to "%s"`, c.Name(), c.Name(), newName)
poolName, err := c.StoragePool()
if err != nil {
- return errors.Wrap(err, "Failed to get source container's storage pool name")
+ return errors.Wrap(err, "Failed to get source instance's storage pool name")
}
pool, err := driver.GetPoolByName(d.State(), poolName)
if err != nil {
- return errors.Wrap(err, "Failed to get source container's storage pool")
+ return errors.Wrap(err, "Failed to get source instance's storage pool")
}
if pool.Driver().Info().Name != "ceph" {
- return fmt.Errorf("Source container's storage pool is not of type ceph")
+ return fmt.Errorf("Source instance's storage pool is not of type ceph")
}
args := migration.VolumeSourceArgs{
@@ -449,7 +449,7 @@ func containerPostClusteringMigrateWithCeph(d *Daemon, c instance.Instance, proj
return nil
})
if err != nil {
- return errors.Wrap(err, "Failed to relink container database data")
+ return errors.Wrap(err, "Failed to relink instance database data")
}
// Create the container mount point on the target node
@@ -506,21 +506,21 @@ func internalClusterContainerMovedPost(d *Daemon, r *http.Request) response.Resp
func containerPostCreateContainerMountPoint(d *Daemon, project, containerName string) error {
c, err := instance.LoadByProjectAndName(d.State(), project, containerName)
if err != nil {
- return errors.Wrap(err, "Failed to load moved container on target node")
+ return errors.Wrap(err, "Failed to load moved instance on target node")
}
poolName, err := c.StoragePool()
if err != nil {
- return errors.Wrap(err, "Failed get pool name of moved container on target node")
+ return errors.Wrap(err, "Failed get pool name of moved instance on target node")
}
snapshotNames, err := d.cluster.ContainerGetSnapshots(project, containerName)
if err != nil {
- return errors.Wrap(err, "Failed to create container snapshot names")
+ return errors.Wrap(err, "Failed to create instance snapshot names")
}
containerMntPoint := driver.GetContainerMountPoint(c.Project(), poolName, containerName)
err = driver.CreateContainerMountpoint(containerMntPoint, c.Path(), c.IsPrivileged())
if err != nil {
- return errors.Wrap(err, "Failed to create container mount point on target node")
+ return errors.Wrap(err, "Failed to create instance mount point on target node")
}
for _, snapshotName := range snapshotNames {
diff --git a/lxd/container_state.go b/lxd/container_state.go
index 0870fb00e9..180d2d8dd8 100644
--- a/lxd/container_state.go
+++ b/lxd/container_state.go
@@ -174,7 +174,7 @@ func containerStatePut(d *Daemon, r *http.Request) response.Response {
}
} else {
if c.IsFrozen() {
- return fmt.Errorf("container is not running")
+ return fmt.Errorf("Instance is not running")
}
err = c.Shutdown(time.Duration(raw.Timeout) * time.Second)
@@ -192,7 +192,7 @@ func containerStatePut(d *Daemon, r *http.Request) response.Response {
}
case shared.Freeze:
if !d.os.CGInfo.Supports(cgroup.Freezer, nil) {
- return response.BadRequest(fmt.Errorf("This system doesn't support freezing containers"))
+ return response.BadRequest(fmt.Errorf("This system doesn't support freezing instances"))
}
opType = db.OperationContainerFreeze
@@ -202,7 +202,7 @@ func containerStatePut(d *Daemon, r *http.Request) response.Response {
}
case shared.Unfreeze:
if !d.os.CGInfo.Supports(cgroup.Freezer, nil) {
- return response.BadRequest(fmt.Errorf("This system doesn't support unfreezing containers"))
+ return response.BadRequest(fmt.Errorf("This system doesn't support unfreezing instances"))
}
opType = db.OperationContainerUnfreeze
diff --git a/lxd/containers.go b/lxd/containers.go
index b43f67c64c..e6169363c8 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -233,7 +233,7 @@ func containersRestart(s *state.State) error {
err = c.Start(false)
if err != nil {
- logger.Errorf("Failed to start container '%s': %v", c.Name(), err)
+ logger.Errorf("Failed to start instance '%s': %v", c.Name(), err)
}
autoStartDelayInt, err := strconv.Atoi(autoStartDelay)
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 4836d1965a..16f36e7709 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -248,7 +248,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) resp
if err != nil {
req.Source.Refresh = false
} else if inst.IsRunning() {
- return response.BadRequest(fmt.Errorf("Cannot refresh a running container"))
+ return response.BadRequest(fmt.Errorf("Cannot refresh a running instance"))
}
}
@@ -329,7 +329,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) resp
// And finally run the migration.
err = sink.Do(d.State(), op)
if err != nil {
- return fmt.Errorf("Error transferring container data: %s", err)
+ return fmt.Errorf("Error transferring instance data: %s", err)
}
err = inst.DeferTemplateApply("copy")
@@ -364,7 +364,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) resp
func createFromCopy(d *Daemon, project string, req *api.InstancesPost) response.Response {
if req.Source.Source == "" {
- return response.BadRequest(fmt.Errorf("must specify a source container"))
+ return response.BadRequest(fmt.Errorf("Must specify a source instance"))
}
sourceProject := req.Source.Project
@@ -412,7 +412,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) response.
_, pool, err := d.cluster.StoragePoolGet(sourcePoolName)
if err != nil {
- err = errors.Wrap(err, "Failed to fetch container's pool info")
+ err = errors.Wrap(err, "Failed to fetch instance's pool info")
return response.SmartError(err)
}
@@ -482,7 +482,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) response.
if err != nil {
req.Source.Refresh = false
} else if c.IsRunning() {
- return response.BadRequest(fmt.Errorf("Cannot refresh a running container"))
+ return response.BadRequest(fmt.Errorf("Cannot refresh a running instance"))
}
}
@@ -798,7 +798,7 @@ func containersPost(d *Daemon, r *http.Request) response.Response {
}
if strings.Contains(req.Name, shared.SnapshotDelimiter) {
- return response.BadRequest(fmt.Errorf("Invalid container name: '%s' is reserved for snapshots", shared.SnapshotDelimiter))
+ return response.BadRequest(fmt.Errorf("Invalid instance name: '%s' is reserved for snapshots", shared.SnapshotDelimiter))
}
// Check that the project's limits are not violated. Also, possibly
@@ -939,7 +939,7 @@ func clusterCopyContainerInternal(d *Daemon, source instance.Instance, project s
// Load source node.
nodeAddress, err = tx.ContainerNodeAddress(project, name, source.Type())
if err != nil {
- return errors.Wrap(err, "Failed to get address of container's node")
+ return errors.Wrap(err, "Failed to get address of instance's node")
}
return nil
@@ -949,7 +949,7 @@ func clusterCopyContainerInternal(d *Daemon, source instance.Instance, project s
}
if nodeAddress == "" {
- return response.BadRequest(fmt.Errorf("The container source is currently offline"))
+ return response.BadRequest(fmt.Errorf("The source instance is currently offline"))
}
// Connect to the container source
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 29652a84c1..47a8c95ad1 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -250,7 +250,7 @@ SELECT nodes.id, nodes.address
}
if rows.Next() {
- return "", fmt.Errorf("more than one node associated with container")
+ return "", fmt.Errorf("More than one node associated with instance")
}
err = rows.Err()
@@ -337,7 +337,7 @@ SELECT instances.name, nodes.id, nodes.address, nodes.heartbeat
func (c *ClusterTx) instanceListExpanded() ([]Instance, error) {
instances, err := c.InstanceList(InstanceFilter{})
if err != nil {
- return nil, errors.Wrap(err, "Load containers")
+ return nil, errors.Wrap(err, "Load instances")
}
projects, err := c.ProjectList(ProjectFilter{})
@@ -467,41 +467,48 @@ func (c *ClusterTx) ContainerNodeMove(project, oldName, newName, newNode string)
// volume.
poolName, err := c.InstancePool(project, oldName)
if err != nil {
- return errors.Wrap(err, "failed to get container's storage pool name")
+ return errors.Wrap(err, "Failed to get instance's storage pool name")
}
+
poolID, err := c.StoragePoolID(poolName)
if err != nil {
- return errors.Wrap(err, "failed to get container's storage pool ID")
+ return errors.Wrap(err, "Failed to get instance's storage pool ID")
}
+
poolDriver, err := c.StoragePoolDriver(poolID)
if err != nil {
- return errors.Wrap(err, "failed to get container's storage pool driver")
+ return errors.Wrap(err, "Failed to get instance's storage pool driver")
}
+
if poolDriver != "ceph" {
- return fmt.Errorf("container's storage pool is not of type ceph")
+ return fmt.Errorf("Instance's storage pool is not of type ceph")
}
// Update the name of the container and of its snapshots, and the node
// ID they are associated with.
containerID, err := c.InstanceID(project, oldName)
if err != nil {
- return errors.Wrap(err, "failed to get container's ID")
+ return errors.Wrap(err, "Failed to get instance's ID")
}
+
node, err := c.NodeByName(newNode)
if err != nil {
- return errors.Wrap(err, "failed to get new node's info")
+ return errors.Wrap(err, "Failed to get new node's info")
}
+
stmt := "UPDATE instances SET node_id=?, name=? WHERE id=?"
result, err := c.tx.Exec(stmt, node.ID, newName, containerID)
if err != nil {
- return errors.Wrap(err, "failed to update container's name and node ID")
+ return errors.Wrap(err, "Failed to update instance's name and node ID")
}
+
n, err := result.RowsAffected()
if err != nil {
- return errors.Wrap(err, "failed to get rows affected by container update")
+ return errors.Wrap(err, "Failed to get rows affected by instance update")
}
+
if n != 1 {
- return fmt.Errorf("unexpected number of updated rows in instances table: %d", n)
+ return fmt.Errorf("Unexpected number of updated rows in instances table: %d", n)
}
// No need to update storage_volumes if the name is identical
@@ -513,19 +520,22 @@ func (c *ClusterTx) ContainerNodeMove(project, oldName, newName, newNode string)
// there's a clone of the volume for each node).
count, err := c.NodesCount()
if err != nil {
- return errors.Wrap(err, "failed to get node's count")
+ return errors.Wrap(err, "Failed to get node's count")
}
+
stmt = "UPDATE storage_volumes SET name=? WHERE name=? AND storage_pool_id=? AND type=?"
result, err = c.tx.Exec(stmt, newName, oldName, poolID, StoragePoolVolumeTypeContainer)
if err != nil {
- return errors.Wrap(err, "failed to update container's volume name")
+ return errors.Wrap(err, "Failed to update instance's volume name")
}
+
n, err = result.RowsAffected()
if err != nil {
- return errors.Wrap(err, "failed to get rows affected by container volume update")
+ return errors.Wrap(err, "Failed to get rows affected by instance volume update")
}
+
if n != int64(count) {
- return fmt.Errorf("unexpected number of updated rows in volumes table: %d", n)
+ return fmt.Errorf("Unexpected number of updated rows in volumes table: %d", n)
}
return nil
@@ -537,6 +547,7 @@ func (c *ClusterTx) ContainerNodeProjectList(project string, instanceType instan
if err != nil {
return nil, errors.Wrap(err, "Local node name")
}
+
filter := InstanceFilter{
Project: project,
Node: node,
diff --git a/lxd/db/errors.go b/lxd/db/errors.go
index 7905eac451..cdd4a2eee2 100644
--- a/lxd/db/errors.go
+++ b/lxd/db/errors.go
@@ -7,7 +7,7 @@ import (
var (
// ErrAlreadyDefined hapens when the given entry already exists,
// for example a container.
- ErrAlreadyDefined = fmt.Errorf("The container/snapshot already exists")
+ ErrAlreadyDefined = fmt.Errorf("The instance/snapshot already exists")
// ErrNoSuchObject is in the case of joins (and probably other) queries,
// we don't get back sql.ErrNoRows when no rows are returned, even though we do
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 54e92e342d..d9ddfd78dd 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -615,7 +615,7 @@ func (c *ClusterTx) NodeWithLeastContainers(archs []int) (string, error) {
pending, err := query.Count(
c.tx, "operations", "node_id=? AND type=?", node.ID, OperationContainerCreate)
if err != nil {
- return "", errors.Wrap(err, "Failed to get pending containers count")
+ return "", errors.Wrap(err, "Failed to get pending instances count")
}
count := created + pending
diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index 9bb2323bf1..352080ccb2 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -230,7 +230,7 @@ func (c *Cluster) StorageVolumeIsAvailable(pool, volume string) (bool, error) {
containers, err := tx.instanceListExpanded()
if err != nil {
- return errors.Wrapf(err, "Fetch containers")
+ return errors.Wrapf(err, "Fetch instances")
}
for _, container := range containers {
diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index bcbbeca2ec..4521c5a6e3 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -977,7 +977,7 @@ func (d *disk) storagePoolVolumeAttachShift(poolName string, volumeName string,
// we can shift the storage volume.
// I'm not sure if we want some locking here.
if volumeUsedBy[0] != d.inst.Name() {
- return fmt.Errorf("idmaps of container and storage volume are not identical")
+ return fmt.Errorf("Idmaps of container and storage volume are not identical")
}
}
}
diff --git a/lxd/instance/instance_utils.go b/lxd/instance/instance_utils.go
index c9a7717a5e..66c3de075e 100644
--- a/lxd/instance/instance_utils.go
+++ b/lxd/instance/instance_utils.go
@@ -112,11 +112,11 @@ func ValidConfig(sysOS *sys.OS, config map[string]string, profile bool, expanded
for k, v := range config {
if profile && strings.HasPrefix(k, "volatile.") {
- return fmt.Errorf("Volatile keys can only be set on containers")
+ return fmt.Errorf("Volatile keys can only be set on instances")
}
if profile && strings.HasPrefix(k, "image.") {
- return fmt.Errorf("Image keys can only be set on containers")
+ return fmt.Errorf("Image keys can only be set on instances")
}
err := validConfigKey(sysOS, k, v)
@@ -442,7 +442,7 @@ func LoadInstanceDatabaseObject(tx *db.ClusterTx, project, name string) (*db.Ins
} else {
container, err = tx.InstanceGet(project, name)
if err != nil {
- return nil, errors.Wrapf(err, "Failed to fetch container %q in project %q", name, project)
+ return nil, errors.Wrapf(err, "Failed to fetch instance %q in project %q", name, project)
}
}
@@ -460,7 +460,7 @@ func LoadByProjectAndName(s *state.State, project, name string) (Instance, error
args := db.InstanceToArgs(container)
inst, err := Load(s, args, nil)
if err != nil {
- return nil, errors.Wrap(err, "Failed to load container")
+ return nil, errors.Wrap(err, "Failed to load instance")
}
return inst, nil
@@ -728,7 +728,7 @@ func BackupLoadByName(s *state.State, project, name string) (*backup.Backup, err
// Load the instance it belongs to
instance, err := LoadByID(s, args.InstanceID)
if err != nil {
- return nil, errors.Wrap(err, "Load container from database")
+ return nil, errors.Wrap(err, "Load instance from database")
}
return backup.New(s, instance, args.ID, name, args.CreationDate, args.ExpiryDate, args.InstanceOnly, args.OptimizedStorage), nil
diff --git a/lxd/maas/controller.go b/lxd/maas/controller.go
index ecc5f1d43d..07390a74db 100644
--- a/lxd/maas/controller.go
+++ b/lxd/maas/controller.go
@@ -109,7 +109,7 @@ func (c *Controller) getDevice(name string) (gomaasapi.Device, error) {
}
if len(devs) != 1 {
- return nil, fmt.Errorf("Couldn't find the specified container: %s", name)
+ return nil, fmt.Errorf("Couldn't find the specified instance: %s", name)
}
return devs[0], nil
diff --git a/lxd/main_forkexec.go b/lxd/main_forkexec.go
index 453ff75a36..f42278ea66 100644
--- a/lxd/main_forkexec.go
+++ b/lxd/main_forkexec.go
@@ -71,7 +71,7 @@ func (c *cmdForkexec) Run(cmd *cobra.Command, args []string) error {
// Load the container
d, err := liblxc.NewContainer(name, lxcpath)
if err != nil {
- return fmt.Errorf("Error initializing container for start: %q", err)
+ return fmt.Errorf("Error initializing instance for start: %q", err)
}
err = d.LoadConfigFile(configPath)
diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go
index bef9d19324..8275cf9278 100644
--- a/lxd/profiles_utils.go
+++ b/lxd/profiles_utils.go
@@ -38,7 +38,7 @@ func doProfileUpdate(d *Daemon, project, name string, id int64, profile *api.Pro
containers, err := getProfileContainersInfo(d.cluster, project, name)
if err != nil {
- return errors.Wrapf(err, "failed to query containers associated with profile '%s'", name)
+ return errors.Wrapf(err, "failed to query instances associated with profile '%s'", name)
}
// Check if the root device is supposed to be changed or removed.
@@ -67,7 +67,7 @@ func doProfileUpdate(d *Daemon, project, name string, id int64, profile *api.Pro
// Found the profile
if profiles[i] == name {
// If it's the current profile, then we can't modify that root device
- return fmt.Errorf("At least one container relies on this profile's root disk device")
+ return fmt.Errorf("At least one instance relies on this profile's root disk device")
} else {
// If it's not, then move on to the next container
break
@@ -170,12 +170,12 @@ func doProfileUpdateCluster(d *Daemon, project, name string, old api.ProfilePut)
return err
})
if err != nil {
- return errors.Wrap(err, "failed to query local node name")
+ return errors.Wrap(err, "Failed to query local node name")
}
containers, err := getProfileContainersInfo(d.cluster, project, name)
if err != nil {
- return errors.Wrapf(err, "failed to query containers associated with profile '%s'", name)
+ return errors.Wrapf(err, "Failed to query instances associated with profile '%s'", name)
}
failures := map[string]error{}
@@ -245,7 +245,7 @@ func getProfileContainersInfo(cluster *db.Cluster, project, profile string) ([]d
// given profile.
names, err := cluster.ProfileContainersGet(project, profile)
if err != nil {
- return nil, errors.Wrapf(err, "failed to query containers with profile '%s'", profile)
+ return nil, errors.Wrapf(err, "Failed to query instances with profile '%s'", profile)
}
containers := []db.InstanceArgs{}
@@ -264,7 +264,7 @@ func getProfileContainersInfo(cluster *db.Cluster, project, profile string) ([]d
return nil
})
if err != nil {
- return nil, errors.Wrapf(err, "Failed to fetch containers")
+ return nil, errors.Wrapf(err, "Failed to fetch instances")
}
return containers, nil
diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index 393da41e7b..64c2ef1ae1 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -2518,7 +2518,7 @@ func (b *lxdBackend) UpdateCustomVolume(volName, newDesc string, newConfig map[s
}
if len(usingVolume) != 0 {
- return fmt.Errorf("Cannot modify shifting with running containers using the volume")
+ return fmt.Errorf("Cannot modify shifting with running instances using the volume")
}
}
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 1c5a63db55..8c4b8568bd 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -594,7 +594,7 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request, volumeTypeName string
}
if len(ctsUsingVolume) > 0 {
- return response.SmartError(fmt.Errorf("Volume is still in use by running containers"))
+ return response.SmartError(fmt.Errorf("Volume is still in use by running instances"))
}
// Detect a rename request.
diff --git a/lxd/template/chroot.go b/lxd/template/chroot.go
index 94a11e1dba..d90ca6bc76 100644
--- a/lxd/template/chroot.go
+++ b/lxd/template/chroot.go
@@ -38,7 +38,7 @@ func (l ChrootLoader) Get(path string) (io.Reader, error) {
// Validate that we're under the expected prefix
if !strings.HasPrefix(path, basePath) {
- return nil, fmt.Errorf("Attempting to access a file outside the container")
+ return nil, fmt.Errorf("Attempting to access a file outside the instance")
}
// Open and read the file
From f6ff00ab439fac0aa5a3776560d2bcb19d875cb2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Sat, 7 Mar 2020 16:23:16 +0100
Subject: [PATCH 2/2] lxd: Rename container files to instance
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
lxd/{container.go => instance.go} | 0
lxd/{container_backup.go => instance_backup.go} | 0
lxd/{container_console.go => instance_console.go} | 0
lxd/{container_delete.go => instance_delete.go} | 0
lxd/{container_exec.go => instance_exec.go} | 0
lxd/{container_file.go => instance_file.go} | 0
lxd/{container_get.go => instance_get.go} | 0
lxd/{container_instance_types.go => instance_instance_types.go} | 0
lxd/{container_logs.go => instance_logs.go} | 0
lxd/{container_metadata.go => instance_metadata.go} | 0
lxd/{container_patch.go => instance_patch.go} | 0
lxd/{container_post.go => instance_post.go} | 0
lxd/{container_put.go => instance_put.go} | 0
lxd/{container_snapshot.go => instance_snapshot.go} | 0
lxd/{container_state.go => instance_state.go} | 0
lxd/{container_test.go => instance_test.go} | 0
lxd/{containers.go => instances.go} | 0
lxd/{containers_get.go => instances_get.go} | 0
lxd/{containers_post.go => instances_post.go} | 0
lxd/{migrate_container.go => migrate_instance.go} | 0
20 files changed, 0 insertions(+), 0 deletions(-)
rename lxd/{container.go => instance.go} (100%)
rename lxd/{container_backup.go => instance_backup.go} (100%)
rename lxd/{container_console.go => instance_console.go} (100%)
rename lxd/{container_delete.go => instance_delete.go} (100%)
rename lxd/{container_exec.go => instance_exec.go} (100%)
rename lxd/{container_file.go => instance_file.go} (100%)
rename lxd/{container_get.go => instance_get.go} (100%)
rename lxd/{container_instance_types.go => instance_instance_types.go} (100%)
rename lxd/{container_logs.go => instance_logs.go} (100%)
rename lxd/{container_metadata.go => instance_metadata.go} (100%)
rename lxd/{container_patch.go => instance_patch.go} (100%)
rename lxd/{container_post.go => instance_post.go} (100%)
rename lxd/{container_put.go => instance_put.go} (100%)
rename lxd/{container_snapshot.go => instance_snapshot.go} (100%)
rename lxd/{container_state.go => instance_state.go} (100%)
rename lxd/{container_test.go => instance_test.go} (100%)
rename lxd/{containers.go => instances.go} (100%)
rename lxd/{containers_get.go => instances_get.go} (100%)
rename lxd/{containers_post.go => instances_post.go} (100%)
rename lxd/{migrate_container.go => migrate_instance.go} (100%)
diff --git a/lxd/container.go b/lxd/instance.go
similarity index 100%
rename from lxd/container.go
rename to lxd/instance.go
diff --git a/lxd/container_backup.go b/lxd/instance_backup.go
similarity index 100%
rename from lxd/container_backup.go
rename to lxd/instance_backup.go
diff --git a/lxd/container_console.go b/lxd/instance_console.go
similarity index 100%
rename from lxd/container_console.go
rename to lxd/instance_console.go
diff --git a/lxd/container_delete.go b/lxd/instance_delete.go
similarity index 100%
rename from lxd/container_delete.go
rename to lxd/instance_delete.go
diff --git a/lxd/container_exec.go b/lxd/instance_exec.go
similarity index 100%
rename from lxd/container_exec.go
rename to lxd/instance_exec.go
diff --git a/lxd/container_file.go b/lxd/instance_file.go
similarity index 100%
rename from lxd/container_file.go
rename to lxd/instance_file.go
diff --git a/lxd/container_get.go b/lxd/instance_get.go
similarity index 100%
rename from lxd/container_get.go
rename to lxd/instance_get.go
diff --git a/lxd/container_instance_types.go b/lxd/instance_instance_types.go
similarity index 100%
rename from lxd/container_instance_types.go
rename to lxd/instance_instance_types.go
diff --git a/lxd/container_logs.go b/lxd/instance_logs.go
similarity index 100%
rename from lxd/container_logs.go
rename to lxd/instance_logs.go
diff --git a/lxd/container_metadata.go b/lxd/instance_metadata.go
similarity index 100%
rename from lxd/container_metadata.go
rename to lxd/instance_metadata.go
diff --git a/lxd/container_patch.go b/lxd/instance_patch.go
similarity index 100%
rename from lxd/container_patch.go
rename to lxd/instance_patch.go
diff --git a/lxd/container_post.go b/lxd/instance_post.go
similarity index 100%
rename from lxd/container_post.go
rename to lxd/instance_post.go
diff --git a/lxd/container_put.go b/lxd/instance_put.go
similarity index 100%
rename from lxd/container_put.go
rename to lxd/instance_put.go
diff --git a/lxd/container_snapshot.go b/lxd/instance_snapshot.go
similarity index 100%
rename from lxd/container_snapshot.go
rename to lxd/instance_snapshot.go
diff --git a/lxd/container_state.go b/lxd/instance_state.go
similarity index 100%
rename from lxd/container_state.go
rename to lxd/instance_state.go
diff --git a/lxd/container_test.go b/lxd/instance_test.go
similarity index 100%
rename from lxd/container_test.go
rename to lxd/instance_test.go
diff --git a/lxd/containers.go b/lxd/instances.go
similarity index 100%
rename from lxd/containers.go
rename to lxd/instances.go
diff --git a/lxd/containers_get.go b/lxd/instances_get.go
similarity index 100%
rename from lxd/containers_get.go
rename to lxd/instances_get.go
diff --git a/lxd/containers_post.go b/lxd/instances_post.go
similarity index 100%
rename from lxd/containers_post.go
rename to lxd/instances_post.go
diff --git a/lxd/migrate_container.go b/lxd/migrate_instance.go
similarity index 100%
rename from lxd/migrate_container.go
rename to lxd/migrate_instance.go
More information about the lxc-devel
mailing list