[lxc-devel] [lxd/master] lxd/vm: Centralize port generation
stgraber on Github
lxc-bot at linuxcontainers.org
Thu Jun 11 00:11:34 UTC 2020
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 572 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20200610/e2e21e69/attachment-0001.bin>
-------------- next part --------------
From c7ed2423d53bf672a91407bce6ccf141f3098cae Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Wed, 10 Jun 2020 20:01:07 -0400
Subject: [PATCH] lxd/vm: Centralize port generation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This adds a new helper function within the config generation logic that
automatically figures out the best PCI config to minimize the use of
bridge ports and devices by using multi-function devices when possible.
Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
lxd/instance/drivers/driver_qemu.go | 281 +++++++++++++-----
lxd/instance/drivers/driver_qemu_templates.go | 256 +++++++---------
2 files changed, 317 insertions(+), 220 deletions(-)
diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go
index 44f9ee0987..e6de385c14 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -1559,98 +1559,224 @@ func (vm *qemu) generateQemuConfigFile(bus string, devConfs []*deviceConfig.RunC
return "", err
}
- // Now add the dynamic parts of the config.
- err = qemuSerial.Execute(sb, map[string]interface{}{
- "architecture": vm.architectureName,
- "ringbufSizeBytes": qmp.RingbufSize,
- })
+ err = vm.addCPUConfig(sb)
if err != nil {
return "", err
}
- err = qemuSCSI.Execute(sb, map[string]interface{}{
- "architecture": vm.architectureName,
- })
+ err = vm.addMemoryConfig(sb)
if err != nil {
return "", err
}
- err = qemuBalloon.Execute(sb, map[string]interface{}{
+ err = qemuDriveFirmware.Execute(sb, map[string]interface{}{
"architecture": vm.architectureName,
+ "roPath": filepath.Join(vm.ovmfPath(), "OVMF_CODE.fd"),
+ "nvramPath": vm.nvramPath(),
+ })
+
+ err = qemuControlSocket.Execute(sb, map[string]interface{}{
+ "path": vm.monitorPath(),
})
if err != nil {
return "", err
}
- err = qemuRNG.Execute(sb, map[string]interface{}{
- "architecture": vm.architectureName,
+ // getBusAddress is used to create any needed root ports and provide
+ // the bus and address that should be used by a device. It supports
+ // automatically setting up multi-function devices and optimize their use.
+ portNum := 0
+ devNum := 5
+
+ type entry struct {
+ bridgeDev int // Device number on the root bridge.
+ bridgeFn int // Function number on the root bridge.
+
+ dev string // Existing device name.
+ fn int // Function number on the existing device.
+ }
+ entries := map[string]*entry{}
+
+ var rootPort *entry
+ getRootPort := func() *entry {
+ if rootPort == nil {
+ rootPort = &entry{
+ bridgeDev: devNum,
+ }
+ devNum++
+ } else {
+ if rootPort.bridgeFn == 7 {
+ rootPort.bridgeFn = 0
+ rootPort.bridgeDev = devNum
+ devNum++
+ } else {
+ rootPort.bridgeFn++
+ }
+ }
+
+ return rootPort
+ }
+
+ getBusAddr := func(group string) (string, string, bool) {
+ // FIXME: Need to figure out if ccw needs any bus logic.
+ if bus == "ccw" {
+ return "", "", false
+ }
+
+ // Find a device group if specified.
+ var p *entry
+ if group != "" {
+ var ok bool
+ p, ok = entries[group]
+ if ok {
+ // Check if group is full.
+ if p.fn == 7 {
+ p.fn = 0
+ if bus == "pci" {
+ p.bridgeDev = devNum
+ devNum++
+ } else if bus == "pcie" {
+ r := getRootPort()
+ p.bridgeDev = r.bridgeDev
+ p.bridgeFn = r.bridgeFn
+ }
+ } else {
+ p.fn++
+ }
+ } else {
+ // Create a new group.
+ p = &entry{}
+
+ if bus == "pci" {
+ p.bridgeDev = devNum
+ devNum++
+ } else if bus == "pcie" {
+ r := getRootPort()
+ p.bridgeDev = r.bridgeDev
+ p.bridgeFn = r.bridgeFn
+ }
+
+ entries[group] = p
+ }
+ } else {
+ // Create a new temporary group.
+ p = &entry{}
+
+ if bus == "pci" {
+ p.bridgeDev = devNum
+ devNum++
+ } else if bus == "pcie" {
+ r := getRootPort()
+ p.bridgeDev = r.bridgeDev
+ p.bridgeFn = r.bridgeFn
+ }
+ }
+
+ multi := p.fn == 0 && group != ""
+
+ if bus == "pci" {
+ return "pci.0", fmt.Sprintf("%x.%d", p.bridgeDev, p.fn), multi
+ }
+
+ if bus == "pcie" {
+ if p.fn == 0 {
+ qemuPCIe.Execute(sb, map[string]interface{}{
+ "index": portNum,
+ "addr": fmt.Sprintf("%x.%d", p.bridgeDev, p.bridgeFn),
+ "multifunction": p.bridgeFn == 0,
+ })
+ p.dev = fmt.Sprintf("qemu_pcie%d", portNum)
+ portNum++
+ }
+
+ return p.dev, fmt.Sprintf("00.%d", p.fn), multi
+ }
+
+ return "", "", false
+ }
+
+ // Now add the fixed set of devices.
+ devBus, devAddr, multi := getBusAddr("generic")
+ err = qemuBalloon.Execute(sb, map[string]interface{}{
+ "bus": bus,
+ "devBus": devBus,
+ "devAddr": devAddr,
+ "multifunction": multi,
})
if err != nil {
return "", err
}
- err = vm.addMemoryConfig(sb)
+ devBus, devAddr, multi = getBusAddr("generic")
+ err = qemuRNG.Execute(sb, map[string]interface{}{
+ "bus": bus,
+ "devBus": devBus,
+ "devAddr": devAddr,
+ "multifunction": multi,
+ })
if err != nil {
return "", err
}
- err = vm.addCPUConfig(sb)
+ devBus, devAddr, multi = getBusAddr("generic")
+ err = qemuVsock.Execute(sb, map[string]interface{}{
+ "bus": bus,
+ "devBus": devBus,
+ "devAddr": devAddr,
+ "vsockID": vm.vsockID(),
+ "multifunction": multi,
+ })
if err != nil {
return "", err
}
- err = qemuDriveFirmware.Execute(sb, map[string]interface{}{
- "architecture": vm.architectureName,
- "roPath": filepath.Join(vm.ovmfPath(), "OVMF_CODE.fd"),
- "nvramPath": vm.nvramPath(),
- })
-
- err = qemuVsock.Execute(sb, map[string]interface{}{
- "architecture": vm.architectureName,
- "vsockID": vm.vsockID(),
+ devBus, devAddr, multi = getBusAddr("generic")
+ err = qemuSerial.Execute(sb, map[string]interface{}{
+ "bus": bus,
+ "devBus": devBus,
+ "devAddr": devAddr,
+ "ringbufSizeBytes": qmp.RingbufSize,
+ "multifunction": multi,
})
if err != nil {
return "", err
}
- err = qemuControlSocket.Execute(sb, map[string]interface{}{
- "architecture": vm.architectureName,
- "path": vm.monitorPath(),
+ devBus, devAddr, multi = getBusAddr("")
+ err = qemuSCSI.Execute(sb, map[string]interface{}{
+ "bus": bus,
+ "devBus": devBus,
+ "devAddr": devAddr,
+ "multifunction": multi,
})
if err != nil {
return "", err
}
- // Indexes used for PCIe address generation (each device type group is assigned their own PCIe address
- // prefix in the templates). Each PCIe device is added as a multifunction device allowing up to 8 devices
- // of each type to be added.
- nicIndex := 0
- diskIndex := 0
- gpuIndex := 0
- chassisIndex := 5 // Internal devices defined in the templates use indexes 1-4.
-
+ devBus, devAddr, multi = getBusAddr("9p")
err = qemuDriveConfig.Execute(sb, map[string]interface{}{
- "architecture": vm.architectureName,
- "path": filepath.Join(vm.Path(), "config"),
- "diskIndex": diskIndex,
+ "bus": bus,
+ "devBus": devBus,
+ "devAddr": devAddr,
+ "path": filepath.Join(vm.Path(), "config"),
+ "multifunction": multi,
})
if err != nil {
return "", err
}
- diskIndex++ // The config drive is a 9p device which uses a PCIe function so increment index.
- // GPU for console.
+ devBus, devAddr, multi = getBusAddr("")
err = qemuVGA.Execute(sb, map[string]interface{}{
- "architecture": vm.architectureName,
- "chassisIndex": chassisIndex,
- "gpuIndex": gpuIndex,
+ "bus": bus,
+ "devBus": devBus,
+ "devAddr": devAddr,
+ "multifunction": multi,
})
if err != nil {
return "", err
}
- gpuIndex++ // The built in GPU device uses a PCIe function so increment index.
- chassisIndex++ // The built in GPU device uses a root port so increment index.
+ // Dynamic devices.
bootIndexes, err := vm.deviceBootPriorities()
if err != nil {
return "", errors.Wrap(err, "Error calculating boot indexes")
@@ -1658,7 +1784,6 @@ func (vm *qemu) generateQemuConfigFile(bus string, devConfs []*deviceConfig.RunC
// Record the mounts we are going to do inside the VM using the agent.
agentMounts := []instancetype.VMAgentMount{}
-
for _, runConf := range devConfs {
// Add drive devices.
if len(runConf.Mounts) > 0 {
@@ -1666,8 +1791,7 @@ func (vm *qemu) generateQemuConfigFile(bus string, devConfs []*deviceConfig.RunC
if drive.TargetPath == "/" {
err = vm.addRootDriveConfig(sb, bootIndexes, drive)
} else if drive.FSType == "9p" {
- err = vm.addDriveDirConfig(sb, diskIndex, fdFiles, &agentMounts, drive)
- diskIndex++ // 9p devices use a PCIe function so increment index.
+ err = vm.addDriveDirConfig(sb, bus, getBusAddr, fdFiles, &agentMounts, drive)
} else {
err = vm.addDriveConfig(sb, bootIndexes, drive)
}
@@ -1679,14 +1803,10 @@ func (vm *qemu) generateQemuConfigFile(bus string, devConfs []*deviceConfig.RunC
// Add network device.
if len(runConf.NetworkInterface) > 0 {
- err = vm.addNetDevConfig(sb, chassisIndex, nicIndex, bootIndexes, runConf.NetworkInterface, fdFiles)
+ err = vm.addNetDevConfig(sb, bus, getBusAddr, bootIndexes, runConf.NetworkInterface, fdFiles)
if err != nil {
return "", err
}
-
- // NIC devices use a PCIe function so increment indexes.
- nicIndex++
- chassisIndex++
}
}
@@ -1840,7 +1960,7 @@ func (vm *qemu) addRootDriveConfig(sb *strings.Builder, bootIndexes map[string]i
}
// addDriveDirConfig adds the qemu config required for adding a supplementary drive directory share.
-func (vm *qemu) addDriveDirConfig(sb *strings.Builder, diskIndex int, fdFiles *[]string, agentMounts *[]instancetype.VMAgentMount, driveConf deviceConfig.MountEntryItem) error {
+func (vm *qemu) addDriveDirConfig(sb *strings.Builder, bus string, getBusAddr func(group string) (string, string, bool), fdFiles *[]string, agentMounts *[]instancetype.VMAgentMount, driveConf deviceConfig.MountEntryItem) error {
mountTag := fmt.Sprintf("lxd_%s", driveConf.DevName)
agentMount := instancetype.VMAgentMount{
@@ -1858,27 +1978,33 @@ func (vm *qemu) addDriveDirConfig(sb *strings.Builder, diskIndex int, fdFiles *[
// Record the 9p mount for the agent.
*agentMounts = append(*agentMounts, agentMount)
+ devBus, devAddr, multi := getBusAddr("9p")
+
// For read only shares, do not use proxy.
if shared.StringInSlice("ro", driveConf.Opts) {
return qemuDriveDir.Execute(sb, map[string]interface{}{
- "architecture": vm.architectureName,
- "devName": driveConf.DevName,
- "mountTag": mountTag,
- "path": driveConf.DevPath,
- "readonly": true,
- "diskIndex": diskIndex,
+ "bus": bus,
+ "devBus": devBus,
+ "devAddr": devAddr,
+ "devName": driveConf.DevName,
+ "mountTag": mountTag,
+ "path": driveConf.DevPath,
+ "readonly": true,
+ "multifunction": multi,
})
}
// Only use proxy for writable shares.
proxyFD := vm.addFileDescriptor(fdFiles, driveConf.DevPath)
return qemuDriveDir.Execute(sb, map[string]interface{}{
- "architecture": vm.architectureName,
- "devName": driveConf.DevName,
- "mountTag": mountTag,
- "proxyFD": proxyFD,
- "readonly": false,
- "diskIndex": diskIndex,
+ "bus": bus,
+ "devBus": devBus,
+ "devAddr": devAddr,
+ "devName": driveConf.DevName,
+ "mountTag": mountTag,
+ "proxyFD": proxyFD,
+ "readonly": false,
+ "multifunction": multi,
})
}
@@ -1911,17 +2037,16 @@ func (vm *qemu) addDriveConfig(sb *strings.Builder, bootIndexes map[string]int,
}
return qemuDrive.Execute(sb, map[string]interface{}{
- "architecture": vm.architectureName,
- "devName": driveConf.DevName,
- "devPath": driveConf.DevPath,
- "bootIndex": bootIndexes[driveConf.DevName],
- "cacheMode": cacheMode,
- "aioMode": aioMode,
+ "devName": driveConf.DevName,
+ "devPath": driveConf.DevPath,
+ "bootIndex": bootIndexes[driveConf.DevName],
+ "cacheMode": cacheMode,
+ "aioMode": aioMode,
})
}
// addNetDevConfig adds the qemu config required for adding a network device.
-func (vm *qemu) addNetDevConfig(sb *strings.Builder, chassisIndex, nicIndex int, bootIndexes map[string]int, nicConfig []deviceConfig.RunConfigItem, fdFiles *[]string) error {
+func (vm *qemu) addNetDevConfig(sb *strings.Builder, bus string, getBusAddr func(group string) (string, string, bool), bootIndexes map[string]int, nicConfig []deviceConfig.RunConfigItem, fdFiles *[]string) error {
var devName, nicName, devHwaddr, pciSlotName string
for _, nicItem := range nicConfig {
if nicItem.Key == "devName" {
@@ -1937,12 +2062,10 @@ func (vm *qemu) addNetDevConfig(sb *strings.Builder, chassisIndex, nicIndex int,
var tpl *template.Template
tplFields := map[string]interface{}{
- "architecture": vm.architectureName,
- "devName": devName,
- "devHwaddr": devHwaddr,
- "bootIndex": bootIndexes[devName],
- "nicIndex": nicIndex,
- "chassisIndex": chassisIndex,
+ "bus": bus,
+ "devName": devName,
+ "devHwaddr": devHwaddr,
+ "bootIndex": bootIndexes[devName],
}
// Detect MACVTAP interface types and figure out which tap device is being used.
@@ -1971,6 +2094,10 @@ func (vm *qemu) addNetDevConfig(sb *strings.Builder, chassisIndex, nicIndex int,
tpl = qemuNetdevPhysical
}
+ devBus, devAddr, multi := getBusAddr("")
+ tplFields["devBus"] = devBus
+ tplFields["devAddr"] = devAddr
+ tplFields["multifunction"] = multi
if tpl != nil {
return tpl.Execute(sb, tplFields)
}
diff --git a/lxd/instance/drivers/driver_qemu_templates.go b/lxd/instance/drivers/driver_qemu_templates.go
index cb1cc10d69..512087812e 100644
--- a/lxd/instance/drivers/driver_qemu_templates.go
+++ b/lxd/instance/drivers/driver_qemu_templates.go
@@ -60,68 +60,72 @@ size = "{{.memSizeBytes}}B"
var qemuSerial = template.Must(template.New("qemuSerial").Parse(`
# LXD serial identifier
-[device]
-driver = "virtio-serial"
-
-[device]
-driver = "virtserialport"
-name = "org.linuxcontainers.lxd"
-chardev = "vserial"
+[device "dev-qemu_serial"]
+{{- if eq .bus "pci" "pcie"}}
+driver = "virtio-serial-pci"
+bus = "{{.devBus}}"
+addr = "{{.devAddr}}"
+{{end -}}
+{{if eq .bus "ccw" -}}
+driver = "virtio-scsi-ccw"
+{{end -}}
+{{if .multifunction -}}
+multifunction = "on"
+{{- end }}
-[chardev "vserial"]
+[chardev "qemu_serial-chardev"]
backend = "ringbuf"
size = "{{.ringbufSizeBytes}}B"
+
+[device "qemu_serial"]
+driver = "virtserialport"
+name = "org.linuxcontainers.lxd"
+chardev = "qemu_serial-chardev"
+bus = "dev-qemu_serial.0"
`))
-var qemuSCSI = template.Must(template.New("qemuSCSI").Parse(`
-# SCSI controller
-{{- if eq .architecture "x86_64" "aarch64" }}
-[device "qemu_pcie1"]
+var qemuPCIe = template.Must(template.New("qemuPCIe").Parse(`
+[device "qemu_pcie{{.index}}"]
driver = "pcie-root-port"
-port = "0x10"
-chassis = "1"
bus = "pcie.0"
+addr = "{{.addr}}"
+port = "{{.index}}"
+chassis = "{{.index}}"
+{{if .multifunction -}}
multifunction = "on"
-addr = "0x2"
{{- end }}
+`))
+var qemuSCSI = template.Must(template.New("qemuSCSI").Parse(`
+# SCSI controller
[device "qemu_scsi"]
-{{- if ne .architecture "s390x"}}
+{{- if eq .bus "pci" "pcie"}}
driver = "virtio-scsi-pci"
-{{- if eq .architecture "ppc64le" }}
-bus = "pci.0"
-{{- else}}
-bus = "qemu_pcie1"
-addr = "0x0"
-{{- end}}
-{{- else}}
+bus = "{{.devBus}}"
+addr = "{{.devAddr}}"
+{{end -}}
+{{if eq .bus "ccw" -}}
driver = "virtio-scsi-ccw"
-{{- end}}
+{{end -}}
+{{if .multifunction -}}
+multifunction = "on"
+{{- end }}
`))
var qemuBalloon = template.Must(template.New("qemuBalloon").Parse(`
# Balloon driver
-{{- if eq .architecture "x86_64" "aarch64" }}
-[device "qemu_pcie2"]
-driver = "pcie-root-port"
-port = "0x11"
-chassis = "2"
-bus = "pcie.0"
-addr = "0x2.0x1"
-{{- end }}
-
[device "qemu_ballon"]
-{{- if ne .architecture "s390x"}}
+{{- if eq .bus "pci" "pcie"}}
driver = "virtio-balloon-pci"
-{{- if eq .architecture "ppc64le" }}
-bus = "pci.0"
-{{- else}}
-bus = "qemu_pcie2"
-addr = "0x0"
+bus = "{{.devBus}}"
+addr = "{{.devAddr}}"
{{- end}}
-{{- else}}
+{{if eq .bus "ccw" -}}
driver = "virtio-balloon-ccw"
{{- end}}
+{{if .multifunction -}}
+multifunction = "on"
+{{- end }}
`))
var qemuRNG = template.Must(template.New("qemuRNG").Parse(`
@@ -130,80 +134,52 @@ var qemuRNG = template.Must(template.New("qemuRNG").Parse(`
qom-type = "rng-random"
filename = "/dev/urandom"
-{{if eq .architecture "x86_64" "aarch64" -}}
-[device "qemu_pcie3"]
-driver = "pcie-root-port"
-port = "0x12"
-chassis = "3"
-bus = "pcie.0"
-addr = "0x2.0x2"
-{{- end }}
-
[device "dev-qemu_rng"]
-rng = "qemu_rng"
-{{if ne .architecture "s390x" -}}
+{{- if eq .bus "pci" "pcie"}}
driver = "virtio-rng-pci"
-{{- if eq .architecture "ppc64le"}}
-bus = "pci.0"
-{{- else}}
-bus = "qemu_pcie3"
-addr = "0x0"
+bus = "{{.devBus}}"
+addr = "{{.devAddr}}"
{{- end}}
-{{- else}}
+{{if eq .bus "ccw" -}}
driver = "virtio-rng-ccw"
{{- end}}
+rng = "qemu_rng"
+{{if .multifunction -}}
+multifunction = "on"
+{{- end }}
`))
var qemuVsock = template.Must(template.New("qemuVsock").Parse(`
# Vsock
-{{if eq .architecture "x86_64" "aarch64" -}}
-[device "qemu_pcie4"]
-driver = "pcie-root-port"
-port = "0x13"
-chassis = "4"
-bus = "pcie.0"
-addr = "0x2.0x3"
-{{- end }}
-
-[device]
-guest-cid = "{{.vsockID}}"
-{{if ne .architecture "s390x" -}}
+[device "qemu_vsock"]
+{{- if eq .bus "pci" "pcie"}}
driver = "vhost-vsock-pci"
-{{if eq .architecture "ppc64le" -}}
-bus = "pci.0"
-{{else -}}
-bus = "qemu_pcie4"
-addr = "0x0"
-{{end -}}
-{{- else}}
+bus = "{{.devBus}}"
+addr = "{{.devAddr}}"
+{{- end}}
+{{if eq .bus "ccw" -}}
driver = "vhost-vsock-ccw"
{{- end}}
+guest-cid = "{{.vsockID}}"
+{{if .multifunction -}}
+multifunction = "on"
+{{- end }}
`))
var qemuVGA = template.Must(template.New("qemuVGA").Parse(`
# VGA
-{{if eq .architecture "x86_64" "aarch64" -}}
-[device "qemu_pcie{{.chassisIndex}}"]
-driver = "pcie-root-port"
-port = "0x{{.gpuIndex}}"
-chassis = "{{.chassisIndex}}"
-bus = "pcie.0"
-addr = "0x5.0x{{.gpuIndex}}"
-multifunction = "on"
-{{- end }}
-
-[device "dev-qemu_vga"]
-{{if ne .architecture "s390x" -}}
+[device "qemu_vga"]
+{{- if eq .bus "pci" "pcie"}}
driver = "virtio-vga"
-{{- if eq .architecture "ppc64le"}}
-bus = "pci.0"
-{{- else}}
-bus = "qemu_pcie{{.chassisIndex}}"
-addr = "0x0"
+bus = "{{.devBus}}"
+addr = "{{.devAddr}}"
{{- end}}
-{{- else}}
+{{if eq .bus "ccw" -}}
driver = "virtio-gpu-ccw"
{{- end}}
+{{if .multifunction -}}
+multifunction = "on"
+{{- end }}
`))
var qemuCPU = template.Must(template.New("qemuCPU").Parse(`
@@ -275,15 +251,19 @@ readonly = "on"
path = "{{.path}}"
[device "dev-qemu_config"]
-fsdev = "qemu_config"
-mount_tag = "config"
-{{if ne .architecture "s390x" -}}
+{{- if eq .bus "pci" "pcie"}}
driver = "virtio-9p-pci"
-multifunction = "on"
-addr = "0x3.0x{{.diskIndex}}"
-{{- else}}
+bus = "{{.devBus}}"
+addr = "{{.devAddr}}"
+{{- end}}
+{{if eq .bus "ccw" -}}
driver = "virtio-9p-ccw"
{{- end}}
+mount_tag = "config"
+fsdev = "qemu_config"
+{{if .multifunction -}}
+multifunction = "on"
+{{- end }}
`))
// Devices use "lxd_" prefix indicating that this is a user named device.
@@ -303,15 +283,19 @@ sock_fd = "{{.proxyFD}}"
{{- end}}
[device "dev-lxd_{{.devName}}"]
-fsdev = "lxd_{{.devName}}"
-mount_tag = "{{.mountTag}}"
-{{if ne .architecture "s390x" -}}
+{{- if eq .bus "pci" "pcie"}}
driver = "virtio-9p-pci"
-multifunction = "on"
-addr = "0x3.0x{{.diskIndex}}"
-{{- else}}
+bus = "{{.devBus}}"
+addr = "{{.devAddr}}"
+{{- end}}
+{{if eq .bus "ccw" -}}
driver = "virtio-9p-ccw"
{{- end}}
+fsdev = "lxd_{{.devName}}"
+mount_tag = "{{.mountTag}}"
+{{if .multifunction -}}
+multifunction = "on"
+{{- end }}
`))
// Devices use "lxd_" prefix indicating that this is a user named device.
@@ -335,36 +319,29 @@ scsi-id = "{{.bootIndex}}"
lun = "1"
drive = "lxd_{{.devName}}"
bootindex = "{{.bootIndex}}"
+{{if .multifunction -}}
+multifunction = "on"
+{{- end }}
`))
// qemuDevTapCommon is common PCI device template for tap based netdevs.
// Use 0x4.0x as the PCIe address prefix for nic devices to allow up to 8 devices of this type.
var qemuDevTapCommon = template.Must(template.New("qemuDevTapCommon").Parse(`
-{{if eq .architecture "x86_64" "aarch64" -}}
-[device "qemu_pcie{{.chassisIndex}}"]
-driver = "pcie-root-port"
-port = "0x{{.nicIndex}}"
-chassis = "{{.chassisIndex}}"
-bus = "pcie.0"
-addr = "0x4.0x{{.nicIndex}}"
-multifunction = "on"
-{{- end }}
-
[device "dev-lxd_{{.devName}}"]
-netdev = "lxd_{{.devName}}"
-mac = "{{.devHwaddr}}"
-{{if ne .architecture "s390x" -}}
+{{- if eq .bus "pci" "pcie"}}
driver = "virtio-net-pci"
-{{if eq .architecture "ppc64le" -}}
-bus = "pci.0"
-{{else -}}
-bus = "qemu_pcie{{.chassisIndex}}"
-addr = "0x0"
-{{end -}}
-{{- else}}
+bus = "{{.devBus}}"
+addr = "{{.devAddr}}"
+{{- end}}
+{{if eq .bus "ccw" -}}
driver = "virtio-net-ccw"
{{- end}}
+netdev = "lxd_{{.devName}}"
+mac = "{{.devHwaddr}}"
bootindex = "{{.bootIndex}}"
+{{if .multifunction -}}
+multifunction = "on"
+{{- end }}
`))
// Devices use "lxd_" prefix indicating that this is a user named device.
@@ -391,26 +368,19 @@ fd = "{{.tapFD}}"
// Devices use "lxd_" prefix indicating that this is a user named device.
var qemuNetdevPhysical = template.Must(template.New("qemuNetdevPhysical").Parse(`
-{{if eq .architecture "x86_64" "aarch64" -}}
-[device "qemu_pcie{{.chassisIndex}}"]
-driver = "pcie-root-port"
-port = "0x{{.nicIndex}}"
-chassis = "{{.chassisIndex}}"
-bus = "pcie.0"
-addr = "0x4.0x{{.nicIndex}}"
-multifunction = "on"
-{{- end }}
-
# Network card ("{{.devName}}" device)
[device "dev-lxd_{{.devName}}"]
+{{- if eq .bus "pci" "pcie"}}
driver = "vfio-pci"
+bus = "{{.devBus}}"
+addr = "{{.devAddr}}"
+{{- end}}
+{{if eq .bus "ccw" -}}
+driver = "vfio-ccw"
+{{- end}}
host = "{{.pciSlotName}}"
bootindex = "{{.bootIndex}}"
-{{if eq .architecture "ppc64le" -}}
-bus = "pci.0"
-{{else -}}
-bus = "qemu_pcie{{.chassisIndex}}"
-addr = "0x0"
-{{end -}}
-
+{{if .multifunction -}}
+multifunction = "on"
+{{- end }}
`))
More information about the lxc-devel
mailing list