diff --git a/go.mod b/go.mod index a59faa25..1b27e5bd 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 toolchain go1.21.0 require ( - github.com/Telmate/proxmox-api-go v0.0.0-20241121215203-222002bcb5bc + github.com/Telmate/proxmox-api-go v0.0.0-20241205214358-976ef5098918 github.com/google/uuid v1.6.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 diff --git a/go.sum b/go.sum index 475bf12f..4933ca7e 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v1.1.0-alpha.2-proton h1:HKz85FwoXx86kVtTvFke7rgHvq/HoloSUvW5semjFWs= github.com/ProtonMail/go-crypto v1.1.0-alpha.2-proton/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= -github.com/Telmate/proxmox-api-go v0.0.0-20241121215203-222002bcb5bc h1:gKm4H2rehb9iTWh4OW/Yt/SNJwoTLHTam+Trjkz2HHY= -github.com/Telmate/proxmox-api-go v0.0.0-20241121215203-222002bcb5bc/go.mod h1:Gu6n6vEn1hlyFUkjrvU+X1fdgaSXLoM9HKYYJqy1fsY= +github.com/Telmate/proxmox-api-go v0.0.0-20241205214358-976ef5098918 h1:dfs2cVaIHtia0uq/Vo6y8pF0McWadzFPW7mAB46d1JI= +github.com/Telmate/proxmox-api-go v0.0.0-20241205214358-976ef5098918/go.mod h1:Gu6n6vEn1hlyFUkjrvU+X1fdgaSXLoM9HKYYJqy1fsY= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= diff --git a/proxmox/data_ha_group.go b/proxmox/data_ha_group.go index cd0d35a8..33ed4a72 100644 --- a/proxmox/data_ha_group.go +++ b/proxmox/data_ha_group.go @@ -1,15 +1,16 @@ package proxmox import ( + "context" "sort" - "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func DataHAGroup() *schema.Resource { return &schema.Resource{ - Read: dataReadHAGroup, + ReadContext: dataReadHAGroup, Schema: map[string]*schema.Schema{ "group_name": { Type: schema.TypeString, @@ -42,17 +43,17 @@ func DataHAGroup() *schema.Resource { } } -func dataReadHAGroup(data *schema.ResourceData, meta interface{}) (err error) { +func dataReadHAGroup(ctx context.Context, data *schema.ResourceData, meta interface{}) diag.Diagnostics { + pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) defer lock.unlock() client := pconf.Client - var group *proxmox.HAGroup - group, err = client.GetHAGroupByName(data.Get("group_name").(string)) + group, err := client.GetHAGroupByName(ctx, data.Get("group_name").(string)) if err != nil { - return err + return diag.FromErr(err) } nodes := group.Nodes diff --git a/proxmox/provider.go b/proxmox/provider.go index df84ba44..7d90d10e 100644 --- a/proxmox/provider.go +++ b/proxmox/provider.go @@ -1,6 +1,7 @@ package proxmox import ( + "context" "crypto/tls" "fmt" "net/url" @@ -264,7 +265,8 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { if err != nil { return nil, err } - permlist, err := client.GetUserPermissions(userID, "/") + ctx := context.Background() + permlist, err := client.GetUserPermissions(ctx, userID, "/") if err != nil { return nil, err } @@ -348,7 +350,7 @@ func getClient(pm_api_url string, // User+Pass authentication if pm_user != "" && pm_password != "" { - err = client.Login(pm_user, pm_password, pm_otp) + err = client.Login(context.Background(), pm_user, pm_password, pm_otp) } // API authentication @@ -366,7 +368,7 @@ func getClient(pm_api_url string, func nextVmId(pconf *providerConfiguration) (nextId int, err error) { pconf.Mutex.Lock() defer pconf.Mutex.Unlock() - nextId, err = pconf.Client.GetNextID(0) + nextId, err = pconf.Client.GetNextID(context.Background(), 0) if err != nil { return 0, err } diff --git a/proxmox/resource_cloud_init_disk.go b/proxmox/resource_cloud_init_disk.go index 42123503..6fb7febd 100644 --- a/proxmox/resource_cloud_init_disk.go +++ b/proxmox/resource_cloud_init_disk.go @@ -131,7 +131,7 @@ func resourceCloudInitDiskCreate(ctx context.Context, d *schema.ResourceData, m } fileName := fmt.Sprintf("tf-ci-%s.iso", d.Get("name").(string)) - err = client.Upload(d.Get("pve_node").(string), d.Get("storage").(string), isoContentType, fileName, r) + err = client.Upload(ctx, d.Get("pve_node").(string), d.Get("storage").(string), isoContentType, fileName, r) if err != nil { return diag.FromErr(err) } @@ -154,7 +154,7 @@ func resourceCloudInitDiskRead(ctx context.Context, d *schema.ResourceData, m in vmRef := &proxmox.VmRef{} vmRef.SetNode(pveNode) vmRef.SetVmType("qemu") - storageContent, err := client.GetStorageContent(vmRef, d.Get("storage").(string)) + storageContent, err := client.GetStorageContent(ctx, vmRef, d.Get("storage").(string)) if err != nil { return diag.FromErr(err) } @@ -182,7 +182,7 @@ func resourceCloudInitDiskDelete(ctx context.Context, d *schema.ResourceData, m storage := strings.SplitN(d.Id(), ":", 2)[0] isoURL := fmt.Sprintf("/nodes/%s/storage/%s/content/%s", d.Get("pve_node").(string), storage, d.Id()) - err := client.Delete(isoURL) + err := client.Delete(ctx, isoURL) if err != nil { return diag.FromErr(err) } diff --git a/proxmox/resource_lxc.go b/proxmox/resource_lxc.go index d6d1fd8b..9d3f0c59 100644 --- a/proxmox/resource_lxc.go +++ b/proxmox/resource_lxc.go @@ -1,6 +1,7 @@ package proxmox import ( + "context" "fmt" "log" "strconv" @@ -10,6 +11,7 @@ import ( pxapi "github.com/Telmate/proxmox-api-go/proxmox" "github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/pxapi/guest/tags" "github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/util" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -18,9 +20,9 @@ var lxcResourceDef *schema.Resource // TODO update tag schema func resourceLxc() *schema.Resource { lxcResourceDef = &schema.Resource{ - Create: resourceLxcCreate, - Read: resourceLxcRead, - Update: resourceLxcUpdate, + CreateContext: resourceLxcCreate, + ReadContext: resourceLxcRead, + UpdateContext: resourceLxcUpdate, DeleteContext: resourceVmQemuDelete, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, @@ -440,7 +442,7 @@ func resourceLxc() *schema.Resource { return lxcResourceDef } -func resourceLxcCreate(d *schema.ResourceData, meta interface{}) error { +func resourceLxcCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) @@ -525,7 +527,7 @@ func resourceLxcCreate(d *schema.ResourceData, meta interface{}) error { nextid = vmID } else { if err != nil { - return err + return diag.FromErr(err) } } @@ -536,10 +538,10 @@ func resourceLxcCreate(d *schema.ResourceData, meta interface{}) error { log.Print("[DEBUG][LxcCreate] cloning LXC") - err = config.CloneLxc(vmr, client) + err = config.CloneLxc(ctx, vmr, client) if err != nil { - return err + return diag.FromErr(err) } // Waiting for the clone to become ready and @@ -549,13 +551,13 @@ func resourceLxcCreate(d *schema.ResourceData, meta interface{}) error { var config_post_clone *pxapi.ConfigLxc for { // Wait until we can actually retrieve the config from the cloned machine - config_post_clone, err = pxapi.NewConfigLxcFromApi(vmr, client) + config_post_clone, err = pxapi.NewConfigLxcFromApi(ctx, vmr, client) if config_post_clone != nil { break // to prevent an infinite loop we check for any other error // this error is actually fine because the clone is not ready yet } else if err.Error() != "vm locked, could not obtain config" { - return err + return diag.FromErr(err) } time.Sleep(5 * time.Second) log.Print("[DEBUG][LxcCreate] Clone still not ready, checking again") @@ -564,34 +566,34 @@ func resourceLxcCreate(d *schema.ResourceData, meta interface{}) error { log.Print("[DEBUG][LxcCreate] Waiting for clone becoming ready") } else { log.Print("[DEBUG][LxcCreate] We must resize") - processDiskResize(config_post_clone.RootFs, config.RootFs, "rootfs", pconf, vmr) + processDiskResize(ctx, config_post_clone.RootFs, config.RootFs, "rootfs", pconf, vmr) } - config_post_resize, err := pxapi.NewConfigLxcFromApi(vmr, client) + config_post_resize, err := pxapi.NewConfigLxcFromApi(ctx, vmr, client) if err != nil { - return err + return diag.FromErr(err) } config.RootFs["size"] = config_post_resize.RootFs["size"] config.RootFs["volume"] = config_post_resize.RootFs["volume"] // Update all remaining stuff - err = config.UpdateConfig(vmr, client) + err = config.UpdateConfig(ctx, vmr, client) if err != nil { - return err + return diag.FromErr(err) } } else { - err = config.CreateLxc(vmr, client) + err = config.CreateLxc(ctx, vmr, client) if err != nil { - return err + return diag.FromErr(err) } } //Start LXC if start parameter is set to true if d.Get("start").(bool) { log.Print("[DEBUG][LxcCreate] starting LXC") - _, err := client.StartVm(vmr) + _, err := client.StartVm(ctx, vmr) if err != nil { - return err + return diag.FromErr(err) } } else { log.Print("[DEBUG][LxcCreate] start = false, not starting LXC") @@ -601,11 +603,11 @@ func resourceLxcCreate(d *schema.ResourceData, meta interface{}) error { d.SetId(resourceId(targetNode, "lxc", vmr.VmId())) lock.unlock() - return resourceLxcRead(d, meta) + return resourceLxcRead(ctx, d, meta) } -func resourceLxcUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceLxcUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) defer lock.unlock() @@ -614,12 +616,12 @@ func resourceLxcUpdate(d *schema.ResourceData, meta interface{}) error { _, _, vmID, err := parseResourceId(d.Id()) if err != nil { - return err + return diag.FromErr(err) } vmr := pxapi.NewVmRef(vmID) - _, err = client.GetVmInfo(vmr) + _, err = client.GetVmInfo(ctx, vmr) if err != nil { - return err + return diag.FromErr(err) } config := pxapi.NewConfigLxc() @@ -677,7 +679,7 @@ func resourceLxcUpdate(d *schema.ResourceData, meta interface{}) error { newNetworks = append(newNetworks, network.(map[string]interface{})) } - processLxcNetworkChanges(oldNetworks, newNetworks, pconf, vmr) + processLxcNetworkChanges(ctx, oldNetworks, newNetworks, pconf, vmr) if len(newNetworks) > 0 { // Drop all the ids since they can't be sent to the API @@ -699,7 +701,7 @@ func resourceLxcUpdate(d *schema.ResourceData, meta interface{}) error { oldRootFs := oldSet.([]interface{})[0].(map[string]interface{}) newRootFs := newSet.([]interface{})[0].(map[string]interface{}) - processLxcDiskChanges(DeviceToMap(oldRootFs, 0), DeviceToMap(newRootFs, 0), pconf, vmr) + processLxcDiskChanges(ctx, DeviceToMap(oldRootFs, 0), DeviceToMap(newRootFs, 0), pconf, vmr) config.RootFs = newRootFs } @@ -707,7 +709,7 @@ func resourceLxcUpdate(d *schema.ResourceData, meta interface{}) error { oldSet, newSet := d.GetChange("mountpoint") oldMounts := DevicesListToMapByKey(oldSet.([]interface{}), "key") newMounts := DevicesListToMapByKey(newSet.([]interface{}), "key") - processLxcDiskChanges(oldMounts, newMounts, pconf, vmr) + processLxcDiskChanges(ctx, oldMounts, newMounts, pconf, vmr) lxcMountpoints := DevicesListToDevices(newSet.([]interface{}), "slot") config.Mountpoints = lxcMountpoints @@ -715,9 +717,9 @@ func resourceLxcUpdate(d *schema.ResourceData, meta interface{}) error { // TODO: Detect changes requiring Reboot - err = config.UpdateConfig(vmr, client) + err = config.UpdateConfig(ctx, vmr, client) if err != nil { - return err + return diag.FromErr(err) } if d.HasChange("pool") { @@ -729,41 +731,41 @@ func resourceLxcUpdate(d *schema.ResourceData, meta interface{}) error { vmr := pxapi.NewVmRef(vmID) vmr.SetPool(oldPool) - _, err := client.UpdateVMPool(vmr, newPool) + _, err := client.UpdateVMPool(ctx, vmr, newPool) if err != nil { - return err + return diag.FromErr(err) } } if d.HasChange("start") { - vmState, err := client.GetVmState(vmr) + vmState, err := client.GetVmState(ctx, vmr) if err == nil && vmState["status"] == "stopped" && d.Get("start").(bool) { log.Print("[DEBUG][LXCUpdate] starting LXC") - _, err = client.StartVm(vmr) + _, err = client.StartVm(ctx, vmr) if err != nil { - return err + return diag.FromErr(err) } } else if err == nil && vmState["status"] == "running" && !d.Get("start").(bool) { log.Print("[DEBUG][LXCUpdate] stopping LXC") - _, err = client.StopVm(vmr) + _, err = client.StopVm(ctx, vmr) if err != nil { - return err + return diag.FromErr(err) } } else if err != nil { - return err + return diag.FromErr(err) } } lock.unlock() - return resourceLxcRead(d, meta) + return resourceLxcRead(ctx, d, meta) } -func resourceLxcRead(d *schema.ResourceData, meta interface{}) error { - return _resourceLxcRead(d, meta) +func resourceLxcRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return diag.FromErr(_resourceLxcRead(ctx, d, meta)) } -func _resourceLxcRead(d *schema.ResourceData, meta interface{}) error { +func _resourceLxcRead(ctx context.Context, d *schema.ResourceData, meta interface{}) error { pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) defer lock.unlock() @@ -774,11 +776,11 @@ func _resourceLxcRead(d *schema.ResourceData, meta interface{}) error { return err } vmr := pxapi.NewVmRef(vmID) - _, err = client.GetVmInfo(vmr) + _, err = client.GetVmInfo(ctx, vmr) if err != nil { return err } - config, err := pxapi.NewConfigLxcFromApi(vmr, client) + config, err := pxapi.NewConfigLxcFromApi(ctx, vmr, client) if err != nil { return err } @@ -838,10 +840,10 @@ func _resourceLxcRead(d *schema.ResourceData, meta interface{}) error { } // Pool - pools, err := client.GetPoolList() + pools, err := client.GetPoolList(ctx) if err == nil { for _, poolInfo := range pools["data"].([]interface{}) { - poolContent, _ := client.GetPoolInfo(poolInfo.(map[string]interface{})["poolid"].(string)) + poolContent, _ := client.GetPoolInfo(ctx, poolInfo.(map[string]interface{})["poolid"].(string)) for _, member := range poolContent["members"].([]interface{}) { if member.(map[string]interface{})["type"] != "storage" { if vmID == int(member.(map[string]interface{})["vmid"].(float64)) { @@ -900,6 +902,7 @@ func _resourceLxcRead(d *schema.ResourceData, meta interface{}) error { } func processLxcDiskChanges( + ctx context.Context, prevDiskSet KeyedDeviceMap, newDiskSet KeyedDeviceMap, pconf *providerConfiguration, vmr *pxapi.VmRef, ) error { @@ -923,7 +926,7 @@ func processLxcDiskChanges( params := map[string]interface{}{} params["delete"] = strings.Join(deleteDiskKeys, ", ") if vmr.GetVmType() == "lxc" { - if _, err := pconf.Client.SetLxcConfig(vmr, params); err != nil { + if _, err := pconf.Client.SetLxcConfig(ctx, vmr, params); err != nil { return err } } else { @@ -954,7 +957,7 @@ func processLxcDiskChanges( } if len(newParams) > 0 { if vmr.GetVmType() == "lxc" { - if _, err := pconf.Client.SetLxcConfig(vmr, newParams); err != nil { + if _, err := pconf.Client.SetLxcConfig(ctx, vmr, newParams); err != nil { return err } } else { @@ -973,7 +976,7 @@ func processLxcDiskChanges( newStorage, ok := newDisk["storage"].(string) if ok && newStorage != prevDisk["storage"] { if vmr.GetVmType() == "lxc" { - _, err := pconf.Client.MoveLxcDisk(vmr, diskSlotName(prevDisk), newStorage) + _, err := pconf.Client.MoveLxcDisk(ctx, vmr, diskSlotName(prevDisk), newStorage) if err != nil { return err } @@ -986,14 +989,14 @@ func processLxcDiskChanges( } // 3. Resize disks with different sizes - if err := processDiskResize(prevDisk, newDisk, diskName, pconf, vmr); err != nil { + if err := processDiskResize(ctx, prevDisk, newDisk, diskName, pconf, vmr); err != nil { return err } } } // Update Volume info - apiResult, err := pconf.Client.GetVmConfig(vmr) + apiResult, err := pconf.Client.GetVmConfig(ctx, vmr) if err != nil { return err } @@ -1020,6 +1023,7 @@ func diskSlotName(disk pxapi.QemuDevice) string { } func processDiskResize( + ctx context.Context, prevDisk pxapi.QemuDevice, newDisk pxapi.QemuDevice, diskName string, pconf *providerConfiguration, vmr *pxapi.VmRef, @@ -1027,7 +1031,7 @@ func processDiskResize( newSize, ok := newDisk["size"] if ok && newSize != prevDisk["size"] { log.Print("[DEBUG][diskResize] resizing disk " + diskName) - _, err := pconf.Client.ResizeQemuDiskRaw(vmr, diskName, newDisk["size"].(string)) + _, err := pconf.Client.ResizeQemuDiskRaw(ctx, vmr, diskName, newDisk["size"].(string)) if err != nil { return err } @@ -1035,7 +1039,7 @@ func processDiskResize( return nil } -func processLxcNetworkChanges(prevNetworks []map[string]interface{}, newNetworks []map[string]interface{}, pconf *providerConfiguration, vmr *pxapi.VmRef) error { +func processLxcNetworkChanges(ctx context.Context, prevNetworks []map[string]interface{}, newNetworks []map[string]interface{}, pconf *providerConfiguration, vmr *pxapi.VmRef) error { delNetworks := make([]map[string]interface{}, 0) // Collect the IDs of networks that exist in `prevNetworks` but not in `newNetworks`. @@ -1065,7 +1069,7 @@ func processLxcNetworkChanges(prevNetworks []map[string]interface{}, newNetworks "delete": strings.Join(deleteNetKeys, ", "), } if vmr.GetVmType() == "lxc" { - if _, err := pconf.Client.SetLxcConfig(vmr, params); err != nil { + if _, err := pconf.Client.SetLxcConfig(ctx, vmr, params); err != nil { return err } } else { diff --git a/proxmox/resource_lxc_disk.go b/proxmox/resource_lxc_disk.go index 0690d1f8..16792712 100644 --- a/proxmox/resource_lxc_disk.go +++ b/proxmox/resource_lxc_disk.go @@ -1,19 +1,21 @@ package proxmox import ( + "context" "fmt" "strings" pxapi "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceLxcDisk() *schema.Resource { return &schema.Resource{ - Create: resourceLxcDiskCreate, - Read: resourceLxcDiskRead, - Update: resourceLxcDiskUpdate, - Delete: resourceLxcDiskDelete, + CreateContext: resourceLxcDiskCreate, + ReadContext: resourceLxcDiskRead, + UpdateContext: resourceLxcDiskUpdate, + DeleteContext: resourceLxcDiskDelete, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, @@ -108,7 +110,7 @@ func resourceLxcDisk() *schema.Resource { } } -func resourceLxcDiskCreate(d *schema.ResourceData, meta interface{}) error { +func resourceLxcDiskCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) @@ -116,15 +118,15 @@ func resourceLxcDiskCreate(d *schema.ResourceData, meta interface{}) error { _, _, vmID, err := parseResourceId(d.Get("container").(string)) if err != nil { - return err + return diag.FromErr(err) } client := pconf.Client vmr := pxapi.NewVmRef(vmID) vmr.SetVmType("lxc") - _, err = client.GetVmInfo(vmr) + _, err = client.GetVmInfo(ctx, vmr) if err != nil { - return err + return diag.FromErr(err) } disk := d.Get("").(map[string]interface{}) @@ -140,19 +142,19 @@ func resourceLxcDiskCreate(d *schema.ResourceData, meta interface{}) error { params := map[string]interface{}{} mpName := fmt.Sprintf("mp%v", d.Get("slot").(int)) params[mpName] = pxapi.FormatDiskParam(disk) - exitStatus, err := pconf.Client.SetLxcConfig(vmr, params) + exitStatus, err := pconf.Client.SetLxcConfig(ctx, vmr, params) if err != nil { - return fmt.Errorf("error updating LXC Mountpoint: %v, error status: %s (params: %v)", err, exitStatus, params) + return diag.Errorf("error updating LXC Mountpoint: %v, error status: %s (params: %v)", err, exitStatus, params) } - if err = _resourceLxcDiskRead(d, meta); err != nil { - return err + if err = _resourceLxcDiskRead(ctx, d, meta); err != nil { + return diag.FromErr(err) } return nil } -func resourceLxcDiskUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceLxcDiskUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) defer lock.unlock() @@ -161,13 +163,13 @@ func resourceLxcDiskUpdate(d *schema.ResourceData, meta interface{}) error { _, _, vmID, err := parseResourceId(d.Get("container").(string)) if err != nil { - return err + return diag.FromErr(err) } vmr := pxapi.NewVmRef(vmID) - _, err = client.GetVmInfo(vmr) + _, err = client.GetVmInfo(ctx, vmr) if err != nil { - return err + return diag.FromErr(err) } oldValue, newValue := d.GetChange("") @@ -175,22 +177,22 @@ func resourceLxcDiskUpdate(d *schema.ResourceData, meta interface{}) error { newDisk := extractDiskOptions(newValue.(map[string]interface{})) // Apply Changes - err = processLxcDiskChanges(DeviceToMap(oldDisk, 0), DeviceToMap(newDisk, 0), pconf, vmr) + err = processLxcDiskChanges(ctx, DeviceToMap(oldDisk, 0), DeviceToMap(newDisk, 0), pconf, vmr) if err != nil { - return fmt.Errorf("error updating LXC Mountpoint: %v", err) + return diag.Errorf("error updating LXC Mountpoint: %v", err) } - return _resourceLxcDiskRead(d, meta) + return diag.FromErr(_resourceLxcDiskRead(ctx, d, meta)) } -func resourceLxcDiskRead(d *schema.ResourceData, meta interface{}) error { +func resourceLxcDiskRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) defer lock.unlock() - return _resourceLxcDiskRead(d, meta) + return diag.FromErr(_resourceLxcDiskRead(ctx, d, meta)) } -func _resourceLxcDiskRead(d *schema.ResourceData, meta interface{}) error { +func _resourceLxcDiskRead(ctx context.Context, d *schema.ResourceData, meta interface{}) error { pconf := meta.(*providerConfiguration) client := pconf.Client @@ -200,12 +202,12 @@ func _resourceLxcDiskRead(d *schema.ResourceData, meta interface{}) error { } vmr := pxapi.NewVmRef(vmID) - _, err = client.GetVmInfo(vmr) + _, err = client.GetVmInfo(ctx, vmr) if err != nil { return err } - apiResult, err := client.GetVmConfig(vmr) + apiResult, err := client.GetVmConfig(ctx, vmr) if err != nil { return err } @@ -231,27 +233,27 @@ func _resourceLxcDiskRead(d *schema.ResourceData, meta interface{}) error { return nil } -func resourceLxcDiskDelete(d *schema.ResourceData, meta interface{}) error { +func resourceLxcDiskDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) defer lock.unlock() _, _, vmID, err := parseResourceId(d.Get("container").(string)) if err != nil { - return err + return diag.FromErr(err) } client := pconf.Client vmr := pxapi.NewVmRef(vmID) - _, err = client.GetVmInfo(vmr) + _, err = client.GetVmInfo(ctx, vmr) if err != nil { - return err + return diag.FromErr(err) } params := map[string]interface{}{} params["delete"] = fmt.Sprintf("mp%v", d.Get("slot").(int)) - if exitStatus, err := pconf.Client.SetLxcConfig(vmr, params); err != nil { - return fmt.Errorf("error deleting LXC Mountpoint: %v, error status: %s (params: %v)", err, exitStatus, params) + if exitStatus, err := pconf.Client.SetLxcConfig(ctx, vmr, params); err != nil { + return diag.Errorf("error deleting LXC Mountpoint: %v, error status: %s (params: %v)", err, exitStatus, params) } return nil diff --git a/proxmox/resource_pool.go b/proxmox/resource_pool.go index 94349daf..47b54154 100644 --- a/proxmox/resource_pool.go +++ b/proxmox/resource_pool.go @@ -1,10 +1,12 @@ package proxmox import ( + "context" "fmt" pxapi "github.com/Telmate/proxmox-api-go/proxmox" "github.com/Telmate/terraform-provider-proxmox/v2/proxmox/Internal/util" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -14,10 +16,10 @@ var poolResourceDef *schema.Resource func resourcePool() *schema.Resource { poolResourceDef = &schema.Resource{ - Create: resourcePoolCreate, - Read: resourcePoolRead, - Update: resourcePoolUpdate, - Delete: resourcePoolDelete, + CreateContext: resourcePoolCreate, + ReadContext: resourcePoolRead, + UpdateContext: resourcePoolUpdate, + DeleteContext: resourcePoolDelete, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -40,7 +42,7 @@ func resourcePool() *schema.Resource { return poolResourceDef } -func resourcePoolCreate(d *schema.ResourceData, meta interface{}) error { +func resourcePoolCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) client := pconf.Client lock := pmParallelBegin(pconf) @@ -51,24 +53,24 @@ func resourcePoolCreate(d *schema.ResourceData, meta interface{}) error { err := pxapi.ConfigPool{ Name: pxapi.PoolName(poolid), Comment: util.Pointer(d.Get("comment").(string)), - }.Create(client) + }.Create(ctx, client) if err != nil { - return err + return diag.FromErr(err) } d.SetId(clusterResourceId("pools", poolid)) - return _resourcePoolRead(d, meta) + return diag.FromErr(_resourcePoolRead(ctx, d, meta)) } -func resourcePoolRead(d *schema.ResourceData, meta interface{}) error { +func resourcePoolRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) defer lock.unlock() - return _resourcePoolRead(d, meta) + return diag.FromErr(_resourcePoolRead(ctx, d, meta)) } -func _resourcePoolRead(d *schema.ResourceData, meta interface{}) error { +func _resourcePoolRead(ctx context.Context, d *schema.ResourceData, meta interface{}) error { pconf := meta.(*providerConfiguration) client := pconf.Client @@ -83,7 +85,7 @@ func _resourcePoolRead(d *schema.ResourceData, meta interface{}) error { logger, _ := CreateSubLogger("resource_pool_read") logger.Info().Str("poolid", poolID).Msg("Reading configuration for poolid") - poolInfo, err := pool.Get(client) + poolInfo, err := pool.Get(ctx, client) if err != nil { d.SetId("") return nil @@ -100,7 +102,7 @@ func _resourcePoolRead(d *schema.ResourceData, meta interface{}) error { return nil } -func resourcePoolUpdate(d *schema.ResourceData, meta interface{}) error { +func resourcePoolUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) defer lock.unlock() @@ -110,7 +112,7 @@ func resourcePoolUpdate(d *schema.ResourceData, meta interface{}) error { client := pconf.Client _, poolID, err := parseClusterResourceId(d.Id()) if err != nil { - return err + return diag.FromErr(err) } logger.Info().Str("poolid", poolID).Msg("Starting update of the Pool resource") @@ -119,16 +121,16 @@ func resourcePoolUpdate(d *schema.ResourceData, meta interface{}) error { err := pxapi.ConfigPool{ Name: pxapi.PoolName(poolID), Comment: util.Pointer(d.Get("comment").(string)), - }.Update(client) + }.Update(ctx, client) if err != nil { - return err + return diag.FromErr(err) } } - return _resourcePoolRead(d, meta) + return diag.FromErr(_resourcePoolRead(ctx, d, meta)) } -func resourcePoolDelete(d *schema.ResourceData, meta interface{}) error { +func resourcePoolDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) defer lock.unlock() @@ -137,10 +139,10 @@ func resourcePoolDelete(d *schema.ResourceData, meta interface{}) error { _, poolID, err := parseClusterResourceId(d.Id()) if err != nil { - return err + return diag.FromErr(err) } - if err = pxapi.PoolName(poolID).Delete(client); err != nil { - return err + if err = pxapi.PoolName(poolID).Delete(ctx, client); err != nil { + return diag.FromErr(err) } return nil diff --git a/proxmox/resource_storage_iso.go b/proxmox/resource_storage_iso.go index 6e74128e..bb0393d2 100644 --- a/proxmox/resource_storage_iso.go +++ b/proxmox/resource_storage_iso.go @@ -1,6 +1,7 @@ package proxmox import ( + "context" "fmt" "io" "net/http" @@ -8,14 +9,15 @@ import ( "strings" "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceStorageIso() *schema.Resource { return &schema.Resource{ - Create: resourceStorageIsoCreate, - Read: resourceStorageIsoRead, - Delete: resourceStorageIsoDelete, + CreateContext: resourceStorageIsoCreate, + ReadContext: resourceStorageIsoRead, + DeleteContext: resourceStorageIsoDelete, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, @@ -57,7 +59,7 @@ func resourceStorageIso() *schema.Resource { } } -func resourceStorageIsoCreate(d *schema.ResourceData, meta interface{}) error { +func resourceStorageIsoCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) lock := pmParallelBegin(pconf) @@ -71,22 +73,22 @@ func resourceStorageIsoCreate(d *schema.ResourceData, meta interface{}) error { client := pconf.Client file, err := os.CreateTemp(os.TempDir(), fileName) if err != nil { - return err + return diag.FromErr(err) } err = _downloadFile(url, file) if err != nil { - return err + return diag.FromErr(err) } file.Seek(0, 0) defer file.Close() - err = client.Upload(node, storage, isoContentType, fileName, file) + err = client.Upload(ctx, node, storage, isoContentType, fileName, file) if err != nil { - return err + return diag.FromErr(err) } volId := fmt.Sprintf("%s:%s/%s", storage, isoContentType, fileName) d.SetId(volId) - return resourceStorageIsoRead(d, meta) + return resourceStorageIsoRead(ctx, d, meta) } func _downloadFile(url string, file *os.File) error { @@ -108,7 +110,7 @@ func _downloadFile(url string, file *os.File) error { return nil } -func resourceStorageIsoRead(d *schema.ResourceData, meta interface{}) error { +func resourceStorageIsoRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) client := pconf.Client @@ -117,9 +119,9 @@ func resourceStorageIsoRead(d *schema.ResourceData, meta interface{}) error { vmRef := &proxmox.VmRef{} vmRef.SetNode(pveNode) vmRef.SetVmType(isoContentType) - storageContent, err := client.GetStorageContent(vmRef, d.Get("storage").(string)) + storageContent, err := client.GetStorageContent(ctx, vmRef, d.Get("storage").(string)) if err != nil { - return err + return diag.FromErr(err) } contents := storageContent["data"].([]interface{}) for c := range contents { @@ -139,7 +141,7 @@ func resourceStorageIsoRead(d *schema.ResourceData, meta interface{}) error { return nil } -func resourceStorageIsoDelete(d *schema.ResourceData, meta interface{}) error { +func resourceStorageIsoDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { pconf := meta.(*providerConfiguration) client := pconf.Client lock := pmParallelBegin(pconf) @@ -147,9 +149,9 @@ func resourceStorageIsoDelete(d *schema.ResourceData, meta interface{}) error { storage := strings.SplitN(d.Id(), ":", 2)[0] isoURL := fmt.Sprintf("/nodes/%s/storage/%s/content/%s", d.Get("pve_node").(string), storage, d.Id()) - err := client.Delete(isoURL) + err := client.Delete(ctx, isoURL) if err != nil { - return err + return diag.FromErr(err) } return nil } diff --git a/proxmox/resource_vm_qemu.go b/proxmox/resource_vm_qemu.go index f08882d4..4528c130 100755 --- a/proxmox/resource_vm_qemu.go +++ b/proxmox/resource_vm_qemu.go @@ -658,9 +658,9 @@ func resourceVmQemu() *schema.Resource { return thisResource } -func getSourceVmr(client *pxapi.Client, name string, id int, targetNode string) (*pxapi.VmRef, error) { +func getSourceVmr(ctx context.Context, client *pxapi.Client, name string, id int, targetNode string) (*pxapi.VmRef, error) { if name != "" { - sourceVmrs, err := client.GetVmRefsByName(name) + sourceVmrs, err := client.GetVmRefsByName(ctx, name) if err != nil { return nil, err } @@ -673,7 +673,7 @@ func getSourceVmr(client *pxapi.Client, name string, id int, targetNode string) } return sourceVmr, nil } else if id != 0 { - return client.GetVmRefById(id) + return client.GetVmRefById(ctx, id) } return nil, errors.New("either 'clone' name or 'clone_id' must be specified") @@ -758,7 +758,7 @@ func resourceVmQemuCreate(ctx context.Context, d *schema.ResourceData, meta inte } log.Printf("[DEBUG][QemuVmCreate] checking for duplicate name: %s", vmName) - dupVmr, _ := client.GetVmRefByName(vmName) + dupVmr, _ := client.GetVmRefByName(ctx, vmName) forceCreate := d.Get("force_create").(bool) @@ -816,14 +816,14 @@ func resourceVmQemuCreate(ctx context.Context, d *schema.ResourceData, meta inte } config.FullClone = &fullClone - sourceVmr, err := getSourceVmr(client, d.Get("clone").(string), d.Get("clone_id").(int), vmr.Node()) + sourceVmr, err := getSourceVmr(ctx, client, d.Get("clone").(string), d.Get("clone_id").(int), vmr.Node()) if err != nil { return append(diags, diag.FromErr(err)...) } log.Print("[DEBUG][QemuVmCreate] cloning VM") logger.Debug().Str("vmid", d.Id()).Msgf("Cloning VM") - err = config.CloneVm(sourceVmr, vmr, client) + err = config.CloneVm(ctx, sourceVmr, vmr, client) if err != nil { return append(diags, diag.FromErr(err)...) } @@ -831,7 +831,7 @@ func resourceVmQemuCreate(ctx context.Context, d *schema.ResourceData, meta inte time.Sleep(time.Duration(d.Get("clone_wait").(int)) * time.Second) log.Print("[DEBUG][QemuVmCreate] update VM after clone") - rebootRequired, err = config.Update(false, vmr, client) + rebootRequired, err = config.Update(ctx, false, vmr, client) if err != nil { // Set the id because when update config fail the vm is still created d.SetId(resourceId(targetNode, "qemu", vmr.VmId())) @@ -861,13 +861,13 @@ func resourceVmQemuCreate(ctx context.Context, d *schema.ResourceData, meta inte return append(diags, diag.FromErr(fmt.Errorf("no network boot option matched in 'boot' config"))...) } log.Print("[DEBUG][QemuVmCreate] create with PXE") - err := config.Create(vmr, client) + err := config.Create(ctx, vmr, client) if err != nil { return append(diags, diag.FromErr(err)...) } } else { log.Print("[DEBUG][QemuVmCreate] create with ISO") - err := config.Create(vmr, client) + err := config.Create(ctx, vmr, client) if err != nil { return append(diags, diag.FromErr(err)...) } @@ -875,9 +875,9 @@ func resourceVmQemuCreate(ctx context.Context, d *schema.ResourceData, meta inte } else { log.Printf("[DEBUG][QemuVmCreate] recycling VM vmId: %d", vmr.VmId()) - client.StopVm(vmr) + client.StopVm(ctx, vmr) - rebootRequired, err = config.Update(false, vmr, client) + rebootRequired, err = config.Update(ctx, false, vmr, client) if err != nil { // Set the id because when update config fail the vm is still created d.SetId(resourceId(targetNode, "qemu", vmr.VmId())) @@ -893,7 +893,7 @@ func resourceVmQemuCreate(ctx context.Context, d *schema.ResourceData, meta inte if d.Get("vm_state").(string) == "running" || d.Get("vm_state").(string) == "started" { log.Print("[DEBUG][QemuVmCreate] starting VM") - _, err := client.StartVm(vmr) + _, err := client.StartVm(ctx, vmr) if err != nil { return append(diags, diag.FromErr(err)...) } @@ -932,7 +932,7 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte logger.Info().Int("vmid", vmID).Msg("Starting update of the VM resource") vmr := pxapi.NewVmRef(vmID) - _, err = client.GetVmInfo(vmr) + _, err = client.GetVmInfo(ctx, vmr) if err != nil { return diag.FromErr(err) } @@ -1003,7 +1003,7 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte var rebootRequired bool automaticReboot := d.Get("automatic_reboot").(bool) // don't let the update function handel the reboot as it can't deal with cloud init changes yet - rebootRequired, err = config.Update(automaticReboot, vmr, client) + rebootRequired, err = config.Update(ctx, automaticReboot, vmr, client) if err != nil { return diag.FromErr(err) } @@ -1094,7 +1094,7 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte // Try rebooting the VM is a reboot is required and automatic_reboot is // enabled. Attempt a graceful shutdown or if that fails, force power-off. - vmState, err := client.GetVmState(vmr) + vmState, err := client.GetVmState(ctx, vmr) if err != nil { return append(diags, diag.FromErr(err)...) } @@ -1103,36 +1103,36 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte case stateStopped: if d.Get("vm_state").(string) == stateRunning { // start the VM log.Print("[DEBUG][QemuVmUpdate] starting VM to match `vm_state`") - if _, err = client.StartVm(vmr); err != nil { + if _, err = client.StartVm(ctx, vmr); err != nil { return append(diags, diag.FromErr(err)...) } } case stateRunning: if d.Get("vm_state").(string) == stateStopped { // shutdown the VM log.Print("[DEBUG][QemuVmUpdate] shutting down VM to match `vm_state`") - _, err = client.ShutdownVm(vmr) + _, err = client.ShutdownVm(ctx, vmr) // note: the default timeout is 3 min, configurable per VM: Options/Start-Shutdown Order/Shutdown timeout if err != nil { log.Print("[DEBUG][QemuVmUpdate] shutdown failed, stopping VM forcefully") - if _, err = client.StopVm(vmr); err != nil { + if _, err = client.StopVm(ctx, vmr); err != nil { return append(diags, diag.FromErr(err)...) } } } else if rebootRequired { // reboot the VM if automaticReboot { // automatic reboots is enabled log.Print("[DEBUG][QemuVmUpdate] rebooting the VM to match the configuration changes") - _, err = client.RebootVm(vmr) + _, err = client.RebootVm(ctx, vmr) // note: the default timeout is 3 min, configurable per VM: Options/Start-Shutdown Order/Shutdown timeout if err != nil { log.Print("[DEBUG][QemuVmUpdate] reboot failed, stopping VM forcefully") - if _, err := client.StopVm(vmr); err != nil { + if _, err := client.StopVm(ctx, vmr); err != nil { return append(diags, diag.FromErr(err)...) } // give sometime to proxmox to catchup dur := time.Duration(d.Get(schemaAdditionalWait).(int)) * time.Second log.Printf("[DEBUG][QemuVmUpdate] waiting for (%v) before starting the VM again", dur) time.Sleep(dur) - if _, err := client.StartVm(vmr); err != nil { + if _, err := client.StartVm(ctx, vmr); err != nil { return append(diags, diag.FromErr(err)...) } } @@ -1187,7 +1187,7 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf } if len(targetNodes) == 0 { - _, err = client.GetVmInfo(vmr) + _, err = client.GetVmInfo(ctx, vmr) if err != nil { logger.Debug().Int("vmid", vmID).Err(err).Msg("failed to get vm info") d.SetId("") @@ -1197,7 +1197,7 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf } else { for _, targetNode := range targetNodes { vmr.SetNode(targetNode) - _, err = client.GetVmInfo(vmr) + _, err = client.GetVmInfo(ctx, vmr) if err != nil { d.SetId("") } @@ -1214,7 +1214,7 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf return nil } - config, err := pxapi.NewConfigQemuFromApi(vmr, client) + config, err := pxapi.NewConfigQemuFromApi(ctx, vmr, client) if err != nil { return diag.FromErr(err) } @@ -1224,7 +1224,7 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf disk.Terraform_Unsafe(d, config.Disks, &ciDisk) } - vmState, err := client.GetVmState(vmr) + vmState, err := client.GetVmState(ctx, vmr) if err != nil { return diag.FromErr(err) } @@ -1233,7 +1233,7 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf if vmState["status"] == "running" { log.Printf("[DEBUG] VM is running, checking the IP") // TODO when network interfaces are reimplemented check if we have an interface before getting the connection info - diags = append(diags, initConnInfo(d, client, vmr, config, ciDisk)...) + diags = append(diags, initConnInfo(ctx, d, client, vmr, config, ciDisk)...) } else { // Optional convenience attributes for provisioners err = d.Set("default_ipv4_address", nil) @@ -1335,12 +1335,12 @@ func resourceVmQemuDelete(ctx context.Context, d *schema.ResourceData, meta inte client := pconf.Client vmId, _ := strconv.Atoi(path.Base(d.Id())) vmr := pxapi.NewVmRef(vmId) - vmState, err := client.GetVmState(vmr) + vmState, err := client.GetVmState(ctx, vmr) if err != nil { return diag.FromErr(err) } if vmState["status"] != "stopped" { - if _, err := client.StopVm(vmr); err != nil { + if _, err := client.StopVm(ctx, vmr); err != nil { return diag.FromErr(err) } @@ -1348,7 +1348,7 @@ func resourceVmQemuDelete(ctx context.Context, d *schema.ResourceData, meta inte // ugly way to wait 5 minutes(300s) waited := 0 for waited < 300 { - vmState, err := client.GetVmState(vmr) + vmState, err := client.GetVmState(ctx, vmr) if err == nil && vmState["status"] == "stopped" { break } else if err != nil { @@ -1360,7 +1360,7 @@ func resourceVmQemuDelete(ctx context.Context, d *schema.ResourceData, meta inte } } - _, err = client.DeleteVm(vmr) + _, err = client.DeleteVm(ctx, vmr) return diag.FromErr(err) } @@ -1552,7 +1552,7 @@ func UpdateDevicesSet( return devicesSet } -func initConnInfo(d *schema.ResourceData, client *pxapi.Client, vmr *pxapi.VmRef, config *pxapi.ConfigQemu, hasCiDisk bool) diag.Diagnostics { +func initConnInfo(ctx context.Context, d *schema.ResourceData, client *pxapi.Client, vmr *pxapi.VmRef, config *pxapi.ConfigQemu, hasCiDisk bool) diag.Diagnostics { logger, _ := CreateSubLogger("initConnInfo") var diags diag.Diagnostics // allow user to opt-out of setting the connection info for the resource @@ -1587,7 +1587,7 @@ func initConnInfo(d *schema.ResourceData, client *pxapi.Client, vmr *pxapi.VmRef log.Printf("[DEBUG][initConnInfo] retries will end at %s", guestAgentWaitEnd) logger.Debug().Int("vmid", vmr.VmId()).Msgf("retrying for at most %v minutes before giving up", guestAgentTimeout) logger.Debug().Int("vmid", vmr.VmId()).Msgf("retries will end at %s", guestAgentWaitEnd) - IPs, agentDiags := getPrimaryIP(config.CloudInit, config.Networks, vmr, client, guestAgentWaitEnd, d.Get(schemaAdditionalWait).(int), d.Get(schemaAgentTimeout).(int), ciAgentEnabled, d.Get(schemaSkipIPv4).(bool), d.Get(schemaSkipIPv6).(bool), hasCiDisk) + IPs, agentDiags := getPrimaryIP(ctx, config.CloudInit, config.Networks, vmr, client, guestAgentWaitEnd, d.Get(schemaAdditionalWait).(int), d.Get(schemaAgentTimeout).(int), ciAgentEnabled, d.Get(schemaSkipIPv4).(bool), d.Get(schemaSkipIPv6).(bool), hasCiDisk) if len(agentDiags) > 0 { diags = append(diags, agentDiags...) } @@ -1618,7 +1618,7 @@ func initConnInfo(d *schema.ResourceData, client *pxapi.Client, vmr *pxapi.VmRef return diags } -func getPrimaryIP(cloudInit *pxapi.CloudInit, networks pxapi.QemuNetworkInterfaces, vmr *pxapi.VmRef, client *pxapi.Client, endTime time.Time, additionalWait, agentTimeout int, ciAgentEnabled, skipIPv4, skipIPv6, hasCiDisk bool) (primaryIPs, diag.Diagnostics) { +func getPrimaryIP(ctx context.Context, cloudInit *pxapi.CloudInit, networks pxapi.QemuNetworkInterfaces, vmr *pxapi.VmRef, client *pxapi.Client, endTime time.Time, additionalWait, agentTimeout int, ciAgentEnabled, skipIPv4, skipIPv6, hasCiDisk bool) (primaryIPs, diag.Diagnostics) { logger, _ := CreateSubLogger("getPrimaryIP") // TODO allow the primary interface to be a different one than the first @@ -1667,7 +1667,7 @@ func getPrimaryIP(cloudInit *pxapi.CloudInit, networks pxapi.QemuNetworkInterfac } for time.Now().Before(endTime) { var interfaces []pxapi.AgentNetworkInterface - interfaces, err = vmr.GetAgentInformation(client, false) + interfaces, err = vmr.GetAgentInformation(ctx, client, false) if err != nil { if !strings.Contains(err.Error(), ErrorGuestAgentNotRunning) { return primaryIPs{}, diag.FromErr(err)