diff --git a/pkg/adaptor/cloud/libvirt/libvirt.go b/pkg/adaptor/cloud/libvirt/libvirt.go index c7847b078..b63d41e3e 100644 --- a/pkg/adaptor/cloud/libvirt/libvirt.go +++ b/pkg/adaptor/cloud/libvirt/libvirt.go @@ -20,6 +20,22 @@ import ( libvirtxml "libvirt.org/go/libvirtxml" ) +const ( + // architecture value for the s390x architecture + archS390x = "s390x" + // hvm indicates that the OS is one designed to run on bare metal, so requires full virtualization. + typeHardwareVirtualMachine = "hvm" +) + +type domainConfig struct { + name string + cpu uint + mem uint + networkName string + bootDisk string + cidataDisk string +} + func createCloudInitISO(v *vmConfig, libvirtClient *libvirtClient) string { logger.Printf("Create cloudInit iso\n") cloudInitIso := libvirtClient.dataDir + "/" + v.name + "-cloudinit.iso" @@ -49,7 +65,7 @@ func createCloudInitISO(v *vmConfig, libvirtClient *libvirtClient) string { } udf.Close() - fmt.Printf("Executing genisoimage\n") + logger.Println("Executing genisoimage") // genisoimage -output cloudInitIso.iso -volid cidata -joliet -rock user-data meta-data cmd := exec.Command("genisoimage", "-output", cloudInitIso, "-volid", "cidata", "-joliet", "-rock", userDataFile, v.metaData) cmd.Stdout = os.Stdout @@ -95,7 +111,7 @@ func checkDomainExistsById(id uint32, libvirtClient *libvirtClient) (exist bool, func uploadIso(isoFile string, isoVolName string, libvirtClient *libvirtClient) (string, error) { - fmt.Printf("Uploading iso file: %s\n", isoFile) + logger.Printf("Uploading iso file: %s\n", isoFile) volumeDef := newDefVolume(isoVolName) img, err := newImage(isoFile) @@ -116,57 +132,200 @@ func uploadIso(isoFile string, isoVolName string, libvirtClient *libvirtClient) } -func CreateDomain(ctx context.Context, libvirtClient *libvirtClient, v *vmConfig) (result *createDomainOutput, err error) { +func getGuestForArchType(caps *libvirtxml.Caps, arch string, ostype string) (*libvirtxml.CapsGuest, error) { + for _, guest := range caps.Guests { + if guest.Arch.Name == arch && guest.OSType == ostype { + return &guest, nil + } + } + return nil, fmt.Errorf("could not find any guests for architecture type %s/%s", ostype, arch) +} - v.cpu = uint(2) - v.mem = uint(8) - v.rootDiskSize = uint64(10) +// getHostCapabilities returns the host capabilities as a struct +func getHostCapabilities(conn *libvirt.Connect) (*libvirtxml.Caps, error) { + capsXML, err := conn.GetCapabilities() + if err != nil { + return nil, fmt.Errorf("unable to get capabilities, cause: %w", err) + } - exists, err := checkDomainExistsByName(v.name, libvirtClient) + caps := &libvirtxml.Caps{} + err = xml.Unmarshal([]byte(capsXML), caps) if err != nil { - return nil, fmt.Errorf("Error in checking instance: %s", err) + return nil, fmt.Errorf("unable to unmarshal capabilities, cause: %w", err) } - if exists { - logger.Printf("Instance already exists ") - return &createDomainOutput{ - instance: v, - }, nil + + return caps, nil +} + +// lookupMachine finds the machine name from the set of available machines +func lookupMachine(machines []libvirtxml.CapsGuestMachine, targetmachine string) string { + for _, machine := range machines { + if machine.Name == targetmachine { + if machine.Canonical != "" { + return machine.Canonical + } + return machine.Name + } } + return "" +} - rootVolName := v.name + "-root.qcow2" - err = createVolume(rootVolName, v.rootDiskSize, libvirtClient.volName, libvirtClient) +// getCanonicalMachineName returns the default (canonical) name of the guest machine based on capabilities +// this is equivalent to doing a `virsh capabilities` and then looking at the `machine` configuration, e.g. `s390-ccw-virtio` +func getCanonicalMachineName(caps *libvirtxml.Caps, arch string, virttype string, targetmachine string) (string, error) { + guest, err := getGuestForArchType(caps, arch, virttype) if err != nil { - return nil, fmt.Errorf("Error in creating volume: %s", err) + return "", err } - cloudInitIso := createCloudInitISO(v, libvirtClient) + name := lookupMachine(guest.Arch.Machines, targetmachine) + if name != "" { + return name, nil + } - isoVolName := v.name + "-cloudinit.iso" - isoVolFile, err := uploadIso(cloudInitIso, isoVolName, libvirtClient) - if err != nil { - return nil, fmt.Errorf("Error in uploading iso volume: %s", err) + for _, domain := range guest.Arch.Domains { + name := lookupMachine(domain.Machines, targetmachine) + if name != "" { + return name, nil + } } - rootVol, err := getVolume(libvirtClient, rootVolName) + return "", fmt.Errorf("cannot find machine type %s for %s/%s in %v", targetmachine, virttype, arch, caps) +} + +func createDomainXMLs390x(client *libvirtClient, cfg *domainConfig) (*libvirtxml.Domain, error) { + + guest, err := getGuestForArchType(client.caps, archS390x, typeHardwareVirtualMachine) if err != nil { - return nil, fmt.Errorf("Error retrieving volume: %s", err) + return nil, err } - rootVolFile, err := rootVol.GetPath() + canonicalmachine, err := getCanonicalMachineName(client.caps, archS390x, typeHardwareVirtualMachine, "s390-ccw-virtio") if err != nil { - return nil, fmt.Errorf("Error retrieving volume path: %s", err) + return nil, err + } + + bootDisk := libvirtxml.DomainDisk{ + Device: "disk", + Target: &libvirtxml.DomainDiskTarget{ + Dev: "vda", + Bus: "virtio", + }, + Driver: &libvirtxml.DomainDiskDriver{ + Name: "qemu", + Type: "qcow2", + IOMMU: "on", + }, + Source: &libvirtxml.DomainDiskSource{ + File: &libvirtxml.DomainDiskSourceFile{ + File: cfg.bootDisk, + }, + }, + Boot: &libvirtxml.DomainDeviceBoot{ + Order: 1, + }, + } + + cloudInitDisk := libvirtxml.DomainDisk{ + Device: "disk", + Target: &libvirtxml.DomainDiskTarget{ + Dev: "vdb", + Bus: "virtio", + }, + Driver: &libvirtxml.DomainDiskDriver{ + Name: "qemu", + Type: "raw", + IOMMU: "on", + }, + Source: &libvirtxml.DomainDiskSource{ + File: &libvirtxml.DomainDiskSourceFile{ + File: cfg.cidataDisk, + }, + }, } - // Gen Domain XML. + return &libvirtxml.Domain{ + Type: "kvm", + Name: cfg.name, + Description: "This Virtual Machine is the peer-pod VM", + OS: &libvirtxml.DomainOS{ + Type: &libvirtxml.DomainOSType{ + Type: typeHardwareVirtualMachine, + Arch: archS390x, + Machine: canonicalmachine, + }, + }, + Metadata: &libvirtxml.DomainMetadata{}, + Memory: &libvirtxml.DomainMemory{ + Value: cfg.mem, Unit: "GiB", + }, + CurrentMemory: &libvirtxml.DomainCurrentMemory{ + Value: cfg.mem, Unit: "GiB", + }, + VCPU: &libvirtxml.DomainVCPU{ + Value: cfg.cpu, + }, + Clock: &libvirtxml.DomainClock{ + Offset: "utc", + }, + Devices: &libvirtxml.DomainDeviceList{ + Disks: []libvirtxml.DomainDisk{ + bootDisk, + cloudInitDisk, + }, + Emulator: guest.Arch.Emulator, + MemBalloon: &libvirtxml.DomainMemBalloon{ + Model: "none", + }, + RNGs: []libvirtxml.DomainRNG{ + { + Model: "virtio", + Backend: &libvirtxml.DomainRNGBackend{ + Random: &libvirtxml.DomainRNGBackendRandom{Device: "/dev/urandom"}, + }, + }, + }, + Consoles: []libvirtxml.DomainConsole{ + { + Source: &libvirtxml.DomainChardevSource{ + Pty: &libvirtxml.DomainChardevSourcePty{}, + }, + Target: &libvirtxml.DomainConsoleTarget{ + Type: "sclp", + }, + }, + }, + Interfaces: []libvirtxml.DomainInterface{ + { + Model: &libvirtxml.DomainInterfaceModel{ + Type: "virtio", + }, + Source: &libvirtxml.DomainInterfaceSource{ + Network: &libvirtxml.DomainInterfaceSourceNetwork{ + Network: cfg.networkName, + }, + }, + Driver: &libvirtxml.DomainInterfaceDriver{ + IOMMU: "on", + }, + }, + }, + }, + }, nil + +} + +func createDomainXMLx86_64(client *libvirtClient, cfg *domainConfig) (*libvirtxml.Domain, error) { + var diskControllerAddr uint = 0 - domCfg := &libvirtxml.Domain{ + return &libvirtxml.Domain{ Type: "kvm", - Name: v.name, + Name: cfg.name, Description: "This Virtual Machine is the peer-pod VM", - Memory: &libvirtxml.DomainMemory{Value: uint(v.mem), Unit: "GiB", DumpCore: "on"}, - VCPU: &libvirtxml.DomainVCPU{Value: uint(v.cpu)}, + Memory: &libvirtxml.DomainMemory{Value: uint(cfg.mem), Unit: "GiB", DumpCore: "on"}, + VCPU: &libvirtxml.DomainVCPU{Value: uint(cfg.cpu)}, OS: &libvirtxml.DomainOS{ - Type: &libvirtxml.DomainOSType{Arch: "x86_64", Type: "hvm"}, + Type: &libvirtxml.DomainOSType{Arch: "x86_64", Type: typeHardwareVirtualMachine}, }, // For Hot-Plug Feature. Features: &libvirtxml.DomainFeatureList{ @@ -184,7 +343,7 @@ func CreateDomain(ctx context.Context, libvirtClient *libvirtClient, v *vmConfig Driver: &libvirtxml.DomainDiskDriver{Type: "qcow2"}, Source: &libvirtxml.DomainDiskSource{ File: &libvirtxml.DomainDiskSourceFile{ - File: rootVolFile}}, + File: cfg.bootDisk}}, Target: &libvirtxml.DomainDiskTarget{ Dev: "sda", Bus: "sata"}, Boot: &libvirtxml.DomainDeviceBoot{Order: 1}, @@ -196,7 +355,7 @@ func CreateDomain(ctx context.Context, libvirtClient *libvirtClient, v *vmConfig Device: "cdrom", Driver: &libvirtxml.DomainDiskDriver{Name: "qemu", Type: "raw"}, Source: &libvirtxml.DomainDiskSource{ - File: &libvirtxml.DomainDiskSourceFile{File: isoVolFile}, + File: &libvirtxml.DomainDiskSourceFile{File: cfg.cidataDisk}, }, Target: &libvirtxml.DomainDiskTarget{Dev: "hda", Bus: "ide"}, ReadOnly: &libvirtxml.DomainDiskReadOnly{}, @@ -208,7 +367,7 @@ func CreateDomain(ctx context.Context, libvirtClient *libvirtClient, v *vmConfig // Network Interfaces. Interfaces: []libvirtxml.DomainInterface{ { - Source: &libvirtxml.DomainInterfaceSource{Network: &libvirtxml.DomainInterfaceSourceNetwork{Network: libvirtClient.networkName}}, + Source: &libvirtxml.DomainInterfaceSource{Network: &libvirtxml.DomainInterfaceSourceNetwork{Network: cfg.networkName}}, Model: &libvirtxml.DomainInterfaceModel{Type: "virtio"}, }, }, @@ -219,6 +378,72 @@ func CreateDomain(ctx context.Context, libvirtClient *libvirtClient, v *vmConfig }, }, }, + }, nil +} + +// createDomainXML detects the machine type of the libvirt host and will return a libvirt XML for that machine type +func createDomainXML(client *libvirtClient, cfg *domainConfig) (*libvirtxml.Domain, error) { + switch client.nodeInfo.Model { + case archS390x: + return createDomainXMLs390x(client, cfg) + default: + return createDomainXMLx86_64(client, cfg) + } +} + +func CreateDomain(ctx context.Context, libvirtClient *libvirtClient, v *vmConfig) (result *createDomainOutput, err error) { + + v.cpu = uint(2) + v.mem = uint(8) + v.rootDiskSize = uint64(10) + + exists, err := checkDomainExistsByName(v.name, libvirtClient) + if err != nil { + return nil, fmt.Errorf("Error in checking instance: %s", err) + } + if exists { + logger.Printf("Instance already exists ") + return &createDomainOutput{ + instance: v, + }, nil + } + + rootVolName := v.name + "-root.qcow2" + err = createVolume(rootVolName, v.rootDiskSize, libvirtClient.volName, libvirtClient) + if err != nil { + return nil, fmt.Errorf("Error in creating volume: %s", err) + } + + cloudInitIso := createCloudInitISO(v, libvirtClient) + + isoVolName := v.name + "-cloudinit.iso" + isoVolFile, err := uploadIso(cloudInitIso, isoVolName, libvirtClient) + if err != nil { + return nil, fmt.Errorf("Error in uploading iso volume: %s", err) + } + + rootVol, err := getVolume(libvirtClient, rootVolName) + if err != nil { + return nil, fmt.Errorf("Error retrieving volume: %s", err) + } + + rootVolFile, err := rootVol.GetPath() + if err != nil { + return nil, fmt.Errorf("Error retrieving volume path: %s", err) + } + + domainCfg := domainConfig{ + name: v.name, + cpu: v.cpu, + mem: v.mem, + networkName: libvirtClient.networkName, + bootDisk: rootVolFile, + cidataDisk: isoVolFile, + } + + domCfg, err := createDomainXML(libvirtClient, &domainCfg) + if err != nil { + return nil, fmt.Errorf("error building the libvirt XML, cause: %w", err) } logger.Printf("Create XML for '%s'", v.name) @@ -370,7 +595,17 @@ func NewLibvirtClient(libvirtCfg Config) (*libvirtClient, error) { return nil, fmt.Errorf("can't find storage pool %q: %v", libvirtCfg.PoolName, err) } - fmt.Printf("Created libvirt connection") + node, err := conn.GetNodeInfo() + if err != nil { + return nil, fmt.Errorf("error retrieving node info: %w", err) + } + + caps, err := getHostCapabilities(conn) + if err != nil { + return nil, err + } + + logger.Println("Created libvirt connection") return &libvirtClient{ connection: conn, @@ -379,6 +614,8 @@ func NewLibvirtClient(libvirtCfg Config) (*libvirtClient, error) { networkName: libvirtCfg.NetworkName, dataDir: libvirtCfg.DataDir, volName: libvirtCfg.VolName, + nodeInfo: node, + caps: caps, }, nil } diff --git a/pkg/adaptor/cloud/libvirt/libvirt_test.go b/pkg/adaptor/cloud/libvirt/libvirt_test.go new file mode 100644 index 000000000..bf9321b01 --- /dev/null +++ b/pkg/adaptor/cloud/libvirt/libvirt_test.go @@ -0,0 +1,116 @@ +// (C) Copyright Confidential Containers Contributors +// SPDX-License-Identifier: Apache-2.0 + +package libvirt + +import ( + "fmt" + "testing" + + "github.com/confidential-containers/cloud-api-adaptor/pkg/adaptor/cloud" + "github.com/stretchr/testify/assert" + libvirtxml "libvirt.org/go/libvirtxml" +) + +var testCfg Config + +func init() { + cloud.DefaultToEnv(&testCfg.URI, "LIBVIRT_URI", "") // explicitly no fallback here + cloud.DefaultToEnv(&testCfg.PoolName, "LIBVIRT_POOL", defaultPoolName) + cloud.DefaultToEnv(&testCfg.NetworkName, "LIBVIRT_NET", defaultNetworkName) + cloud.DefaultToEnv(&testCfg.VolName, "LIBVIRT_VOL_NAME", defaultVolName) +} + +func checkConfig(t *testing.T) { + if testCfg.URI == "" { + t.Skipf("Skipping because LIBVIRT_URI is not configured") + } +} + +func TestLibvirtConnection(t *testing.T) { + checkConfig(t) + + client, err := NewLibvirtClient(testCfg) + if err != nil { + t.Error(err) + } + defer client.connection.Close() + + assert.NotNil(t, client.nodeInfo) + assert.NotNil(t, client.caps) +} + +func TestGetArchitecture(t *testing.T) { + checkConfig(t) + + client, err := NewLibvirtClient(testCfg) + if err != nil { + t.Error(err) + } + defer client.connection.Close() + + node, err := client.connection.GetNodeInfo() + if err != nil { + t.Error(err) + } + + arch := node.Model + if arch == "" { + t.FailNow() + } +} + +func verifyDomainXML(domXML *libvirtxml.Domain) error { + arch := domXML.OS.Type.Arch + if arch != archS390x { + return nil + } + // verify we have iommu on the disks + for i, disk := range domXML.Devices.Disks { + if disk.Driver.IOMMU != "on" { + return fmt.Errorf("disk [%d] does not have IOMMU assigned", i) + } + } + // verify we have iommu on the networks + for i, iface := range domXML.Devices.Interfaces { + if iface.Driver.IOMMU != "on" { + return fmt.Errorf("interface [%d] does not have IOMMU assigned", i) + } + } + return nil +} + +func TestCreateDomainXMLs390x(t *testing.T) { + checkConfig(t) + + client, err := NewLibvirtClient(testCfg) + if err != nil { + t.Error(err) + } + defer client.connection.Close() + + domainCfg := domainConfig{ + name: "TestCreateDomainS390x", + cpu: 2, + mem: 2, + networkName: client.networkName, + bootDisk: "/var/lib/libvirt/images/root.qcow2", + cidataDisk: "/var/lib/libvirt/images/cidata.iso", + } + + domCfg, err := createDomainXML(client, &domainCfg) + if err != nil { + t.Error(err) + } + + arch := domCfg.OS.Type.Arch + if domCfg.OS.Type.Arch != archS390x { + t.Skipf("Skipping because architecture is [%s] and not [%s].", arch, archS390x) + } + + // verify the config + err = verifyDomainXML(domCfg) + if err != nil { + t.Error(err) + } +} diff --git a/pkg/adaptor/cloud/libvirt/types.go b/pkg/adaptor/cloud/libvirt/types.go index 65f9f6867..79f53c10d 100644 --- a/pkg/adaptor/cloud/libvirt/types.go +++ b/pkg/adaptor/cloud/libvirt/types.go @@ -9,6 +9,7 @@ import ( "net/netip" libvirt "libvirt.org/go/libvirt" + libvirtxml "libvirt.org/go/libvirtxml" ) type Config struct { @@ -48,4 +49,10 @@ type libvirtClient struct { dataDir string volName string + + // information about the target node + nodeInfo *libvirt.NodeInfo + + // host capabilities + caps *libvirtxml.Caps }