From 198f08f64156fe36d47fcf8822ad99b6ab658e34 Mon Sep 17 00:00:00 2001 From: PJ Richardson <52798694+prichard77@users.noreply.github.com> Date: Wed, 19 Aug 2020 08:48:47 -0400 Subject: [PATCH] [RFR] Remove local VM fixtures and replace usage with global create_vm fixture. (#10229) * Remove new_vm fixture and replace usage with create_vm in test_vm_migrate.py * Update test_vm_ownership.py to use create_vm * Update TestControlOnQuadicons in test_vm_power_control.py for create_vm usage * Update vm_name fixture to use create_vm.name * Update the rest of test_vm_power_control.py to use create_vm * Update the rest of test_vm_reconfigure.py to use create_vm * Add create_vm back where rebase removed it * Update _create_vm to handle exception thrown by provider.refresh_provider_relationships() when using orphaned VMs * Update except so exceptions other than Provider collection empty will get raised * Update refresh_provider_relationships to use LookupError instead of Exception * Catch TimeOutError in cleanup_on_provider and log it --- cfme/common/provider.py | 2 +- cfme/common/vm.py | 8 +- cfme/fixtures/vm.py | 6 +- cfme/tests/infrastructure/test_vm_migrate.py | 27 +- .../tests/infrastructure/test_vm_ownership.py | 21 +- .../infrastructure/test_vm_power_control.py | 295 +++++++++--------- .../infrastructure/test_vm_reconfigure.py | 156 +++++---- 7 files changed, 231 insertions(+), 284 deletions(-) diff --git a/cfme/common/provider.py b/cfme/common/provider.py index bff6a98c79..a481986439 100644 --- a/cfme/common/provider.py +++ b/cfme/common/provider.py @@ -799,7 +799,7 @@ def refresh_provider_relationships(self, from_list_view=False, col[0].action.refresh() self.wait_for_relationship_refresh(wait, delay, refresh_delta) except IndexError: - raise Exception("Provider collection empty") + raise LookupError("Provider collection empty") @refresh_provider_relationships.variant('ui') def refresh_provider_relationships_ui(self, from_list_view=False, wait=0, delay=1, diff --git a/cfme/common/vm.py b/cfme/common/vm.py index a2cf9e45f1..f291eb4665 100644 --- a/cfme/common/vm.py +++ b/cfme/common/vm.py @@ -42,6 +42,7 @@ from cfme.utils.rest import assert_response from cfme.utils.update import Updateable from cfme.utils.virtual_machines import deploy_template +from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for @@ -989,10 +990,13 @@ def cleanup_on_provider(self, handle_cleanup_exception=True): Helper method to avoid NotFoundError's during test case tear down. """ if self.exists_on_provider: - wait_for(self.mgmt.cleanup, handle_exception=handle_cleanup_exception, + try: + wait_for(self.mgmt.cleanup, handle_exception=handle_cleanup_exception, timeout=300) + except TimedOutError: + logger.exception(f'cleanup_on_provider: entity {self.name} timed out.') else: - logger.debug('cleanup_on_provider: entity "%s" does not exist', self.name) + logger.debug(f'cleanup_on_provider: entity {self.name} does not exist') def equal_drift_results(self, drift_section, section, *indexes): """Compares drift analysis results of a row specified by it's title text. diff --git a/cfme/fixtures/vm.py b/cfme/fixtures/vm.py index 4426855628..079728e008 100644 --- a/cfme/fixtures/vm.py +++ b/cfme/fixtures/vm.py @@ -49,7 +49,11 @@ def _create_vm(request, template_type, provider, vm_name): @request.addfinalizer def _cleanup(): vm_obj.cleanup_on_provider() - provider.refresh_provider_relationships() + try: + provider.refresh_provider_relationships() + except Exception as e: + if e.args[0] != "Provider collection empty": + raise vm_obj.mgmt.ensure_state(VmState.RUNNING) diff --git a/cfme/tests/infrastructure/test_vm_migrate.py b/cfme/tests/infrastructure/test_vm_migrate.py index 17b621521f..09cfd6d89d 100644 --- a/cfme/tests/infrastructure/test_vm_migrate.py +++ b/cfme/tests/infrastructure/test_vm_migrate.py @@ -5,7 +5,6 @@ from cfme.infrastructure.provider.rhevm import RHEVMProvider from cfme.infrastructure.provider.virtualcenter import VMwareProvider from cfme.utils.appliance.implementations.ui import navigate_to -from cfme.utils.generators import random_vm_name from cfme.utils.log_validator import LogValidator @@ -17,25 +16,7 @@ ] -@pytest.fixture() -def new_vm(setup_provider, provider): - """Fixture to provision appliance to the provider being tested if necessary""" - vm_name = random_vm_name(context='migrate') - try: - template_name = provider.data.templates.small_template.name - except AttributeError: - pytest.skip('Could not find templates.small_template.name in provider yaml: {}' - .format(provider.data)) - - vm = provider.appliance.collections.infra_vms.instantiate(vm_name, provider, template_name) - - if not provider.mgmt.does_vm_exist(vm_name): - vm.create_on_provider(find_in_cfme=True, allow_skip="default") - yield vm - vm.cleanup_on_provider() - - -def test_vm_migrate(appliance, new_vm, provider): +def test_vm_migrate(appliance, create_vm, provider): """Tests migration of a vm Metadata: @@ -47,15 +28,15 @@ def test_vm_migrate(appliance, new_vm, provider): initialEstimate: 1/4h """ # auto_test_services should exist to test migrate VM - view = navigate_to(new_vm, 'Details') + view = navigate_to(create_vm, 'Details') vm_host = view.entities.summary('Relationships').get_text_of('Host') hosts = [vds.name for vds in provider.hosts.all() if vds.name not in vm_host] if hosts: migrate_to = hosts[0] else: pytest.skip("There is only one host in the provider") - new_vm.migrate_vm("email@xyz.com", "first", "last", host=migrate_to) - request_description = new_vm.name + create_vm.migrate_vm("email@xyz.com", "first", "last", host=migrate_to) + request_description = create_vm.name cells = {'Description': request_description, 'Request Type': 'Migrate'} migrate_request = appliance.collections.requests.instantiate(request_description, cells=cells, partial_check=True) diff --git a/cfme/tests/infrastructure/test_vm_ownership.py b/cfme/tests/infrastructure/test_vm_ownership.py index 8501c19611..c411a8033e 100644 --- a/cfme/tests/infrastructure/test_vm_ownership.py +++ b/cfme/tests/infrastructure/test_vm_ownership.py @@ -8,7 +8,6 @@ from cfme.markers.env_markers.provider import ONE_PER_TYPE from cfme.rest.gen_data import vm as _vm from cfme.utils.appliance.implementations.ui import navigate_to -from cfme.utils.generators import random_vm_name from cfme.utils.rest import assert_response from cfme.utils.wait import wait_for @@ -113,21 +112,9 @@ def test_set_vm_owner(self, appliance, vm, from_detail): assert rest_vm.evm_owner.userid == "admin" -@pytest.fixture(scope='function') -def small_vm(provider, small_template): - vm = provider.appliance.collections.infra_vms.instantiate(random_vm_name(context='rename'), - provider, - small_template.name) - vm.create_on_provider(find_in_cfme=True, allow_skip="default") - vm.refresh_relationships() - yield vm - if vm.exists: - vm.cleanup_on_provider() - - @test_requirements.power @pytest.mark.provider([VMwareProvider], scope="function", selector=ONE_PER_TYPE) -def test_rename_vm(small_vm): +def test_rename_vm(create_vm): """Test for rename the VM. Polarion: @@ -144,9 +131,9 @@ def test_rename_vm(small_vm): 5. Click on submit 6. Check whether VM is renamed or not """ - view = navigate_to(small_vm, 'Details') - vm_name = small_vm.name - changed_vm = small_vm.rename(new_vm_name=fauxfactory.gen_alphanumeric(15, start="renamed_")) + view = navigate_to(create_vm, 'Details') + vm_name = create_vm.name + changed_vm = create_vm.rename(new_vm_name=fauxfactory.gen_alphanumeric(15, start="renamed_")) view.flash.wait_displayed(timeout=20) view.flash.assert_success_message('Rename of Virtual Machine "{vm_name}" has been initiated' .format(vm_name=vm_name)) diff --git a/cfme/tests/infrastructure/test_vm_power_control.py b/cfme/tests/infrastructure/test_vm_power_control.py index d36a2355d6..d20837e212 100644 --- a/cfme/tests/infrastructure/test_vm_power_control.py +++ b/cfme/tests/infrastructure/test_vm_power_control.py @@ -14,7 +14,6 @@ from cfme.rest.gen_data import users as _users from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ -from cfme.utils.generators import random_vm_name from cfme.utils.log import logger from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for @@ -29,50 +28,24 @@ @pytest.fixture(scope='function') -def vm_name(): - return random_vm_name('pwr-c') +def vm_name(create_vm): + return create_vm.name @pytest.fixture(scope="function") -def testing_vm(appliance, provider, vm_name): - """Fixture to provision vm to the provider being tested""" - vm = appliance.collections.infra_vms.instantiate(vm_name, provider) - - if not provider.mgmt.does_vm_exist(vm.name): - logger.info("deploying %s on provider %s", vm.name, provider.key) - vm.create_on_provider(allow_skip="default", find_in_cfme=True) - yield vm - vm.cleanup_on_provider() - if_scvmm_refresh_provider(provider) - - -@pytest.fixture(scope="function") -def archived_vm(testing_vm): +def archived_vm(create_vm): """Fixture to archive testing VM""" - testing_vm.mgmt.delete() - testing_vm.wait_for_vm_state_change(desired_state='archived', timeout=720, - from_details=False, from_any_provider=True) + create_vm.mgmt.delete() + create_vm.wait_for_vm_state_change(desired_state='archived', timeout=720, + from_details=False, from_any_provider=True) @pytest.fixture(scope="function") -def orphaned_vm(provider, testing_vm): +def orphaned_vm(provider, create_vm): """Fixture to orphane VM by removing provider from CFME""" provider.delete_if_exists(cancel=False) - testing_vm.wait_for_vm_state_change(desired_state='orphaned', timeout=720, - from_details=False, from_any_provider=True) - - -@pytest.fixture(scope="function") -def testing_vm_tools(appliance, provider, vm_name, full_template): - """Fixture to provision vm with preinstalled tools to the provider being tested""" - vm = appliance.collections.infra_vms.instantiate(vm_name, provider, full_template.name) - - if not provider.mgmt.does_vm_exist(vm.name): - logger.info("deploying %s on provider %s", vm.name, provider.key) - vm.create_on_provider(allow_skip="default", find_in_cfme=True) - yield vm - vm.cleanup_on_provider() - if_scvmm_refresh_provider(provider) + create_vm.wait_for_vm_state_change(desired_state='orphaned', timeout=720, + from_details=False, from_any_provider=True) def if_scvmm_refresh_provider(provider): @@ -152,7 +125,7 @@ def _wait_for_tools_ok(): class TestControlOnQuadicons: - def test_power_off_cancel(self, testing_vm, ensure_vm_running, soft_assert): + def test_power_off_cancel(self, create_vm, ensure_vm_running, soft_assert): """Tests power off cancel Metadata: @@ -163,17 +136,17 @@ def test_power_off_cancel(self, testing_vm, ensure_vm_running, soft_assert): casecomponent: Infra initialEstimate: 1/10h """ - testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=720) - testing_vm.power_control_from_cfme(option=testing_vm.POWER_OFF, cancel=True) - if_scvmm_refresh_provider(testing_vm.provider) + create_vm.wait_for_vm_state_change(desired_state=create_vm.STATE_ON, timeout=720) + create_vm.power_control_from_cfme(option=create_vm.POWER_OFF, cancel=True) + if_scvmm_refresh_provider(create_vm.provider) # TODO: assert no event. time.sleep(60) - vm_state = testing_vm.find_quadicon().data['state'] + vm_state = create_vm.find_quadicon().data['state'] soft_assert(vm_state == 'on') soft_assert( - testing_vm.mgmt.is_running, "vm not running") + create_vm.mgmt.is_running, "vm not running") - def test_power_off(self, appliance, testing_vm, ensure_vm_running, soft_assert): + def test_power_off(self, appliance, create_vm, ensure_vm_running, soft_assert): """Tests power off Polarion: @@ -183,19 +156,19 @@ def test_power_off(self, appliance, testing_vm, ensure_vm_running, soft_assert): caseimportance: high tags: power """ - testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=720) - testing_vm.power_control_from_cfme(option=testing_vm.POWER_OFF, cancel=False) + create_vm.wait_for_vm_state_change(desired_state=create_vm.STATE_ON, timeout=720) + create_vm.power_control_from_cfme(option=create_vm.POWER_OFF, cancel=False) view = appliance.browser.create_view(BaseLoggedInPage) view.flash.assert_success_message(text='Stop initiated', partial=True) - if_scvmm_refresh_provider(testing_vm.provider) - testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_OFF, timeout=900) - vm_state = testing_vm.find_quadicon().data['state'] + if_scvmm_refresh_provider(create_vm.provider) + create_vm.wait_for_vm_state_change(desired_state=create_vm.STATE_OFF, timeout=900) + vm_state = create_vm.find_quadicon().data['state'] soft_assert(vm_state == 'off') - soft_assert(not testing_vm.mgmt.is_running, "vm running") + soft_assert(not create_vm.mgmt.is_running, "vm running") - def test_power_on_cancel(self, testing_vm, ensure_vm_stopped, soft_assert): + def test_power_on_cancel(self, create_vm, ensure_vm_stopped, soft_assert): """Tests power on cancel Polarion: @@ -205,16 +178,16 @@ def test_power_on_cancel(self, testing_vm, ensure_vm_stopped, soft_assert): caseimportance: high tags: power """ - testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_OFF, timeout=720) - testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=True) - if_scvmm_refresh_provider(testing_vm.provider) + create_vm.wait_for_vm_state_change(desired_state=create_vm.STATE_OFF, timeout=720) + create_vm.power_control_from_cfme(option=create_vm.POWER_ON, cancel=True) + if_scvmm_refresh_provider(create_vm.provider) time.sleep(60) - vm_state = testing_vm.find_quadicon().data['state'] + vm_state = create_vm.find_quadicon().data['state'] soft_assert(vm_state == 'off') - soft_assert(not testing_vm.mgmt.is_running, "vm running") + soft_assert(not create_vm.mgmt.is_running, "vm running") @pytest.mark.tier(1) - def test_power_on(self, appliance, testing_vm, ensure_vm_stopped, soft_assert): + def test_power_on(self, appliance, create_vm, ensure_vm_stopped, soft_assert): """Tests power on Metadata: @@ -227,22 +200,22 @@ def test_power_on(self, appliance, testing_vm, ensure_vm_stopped, soft_assert): caseimportance: high tags: power """ - testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_OFF, timeout=720) - testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False) + create_vm.wait_for_vm_state_change(desired_state=create_vm.STATE_OFF, timeout=720) + create_vm.power_control_from_cfme(option=create_vm.POWER_ON, cancel=False) view = appliance.browser.create_view(BaseLoggedInPage) view.flash.assert_success_message(text='Start initiated', partial=True) - if_scvmm_refresh_provider(testing_vm.provider) - testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=900) - vm_state = testing_vm.find_quadicon().data['state'] + if_scvmm_refresh_provider(create_vm.provider) + create_vm.wait_for_vm_state_change(desired_state=create_vm.STATE_ON, timeout=900) + vm_state = create_vm.find_quadicon().data['state'] soft_assert(vm_state == 'on') - soft_assert(testing_vm.mgmt.is_running, "vm not running") + soft_assert(create_vm.mgmt.is_running, "vm not running") class TestVmDetailsPowerControlPerProvider: - def test_power_off(self, appliance, testing_vm, ensure_vm_running, soft_assert): + def test_power_off(self, appliance, create_vm, ensure_vm_running, soft_assert): """Tests power off Metadata: @@ -255,27 +228,28 @@ def test_power_off(self, appliance, testing_vm, ensure_vm_running, soft_assert): caseimportance: high tags: power """ - testing_vm.wait_for_vm_state_change( - desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) - view = navigate_to(testing_vm, "Details") + create_vm.wait_for_vm_state_change( + desired_state=create_vm.STATE_ON, timeout=720, from_details=True) + view = navigate_to(create_vm, "Details") last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time") - testing_vm.power_control_from_cfme(option=testing_vm.POWER_OFF, cancel=False, - from_details=True) + create_vm.power_control_from_cfme(option=create_vm.POWER_OFF, + cancel=False, + from_details=True) view.flash.assert_success_message(text='Stop initiated', partial=True) - if_scvmm_refresh_provider(testing_vm.provider) - testing_vm.wait_for_vm_state_change( - desired_state=testing_vm.STATE_OFF, timeout=720, from_details=True) - soft_assert(not testing_vm.mgmt.is_running, "vm running") + if_scvmm_refresh_provider(create_vm.provider) + create_vm.wait_for_vm_state_change( + desired_state=create_vm.STATE_OFF, timeout=720, from_details=True) + soft_assert(not create_vm.mgmt.is_running, "vm running") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 - if not testing_vm.provider.one_of(RHEVMProvider): + if not create_vm.provider.one_of(RHEVMProvider): new_last_boot_time = view.entities.summary("Power Management").get_text_of( "Last Boot Time") soft_assert(new_last_boot_time == last_boot_time, f"ui: {new_last_boot_time} should == orig: {last_boot_time}") - def test_power_on(self, appliance, testing_vm, ensure_vm_stopped, soft_assert): + def test_power_on(self, appliance, create_vm, ensure_vm_stopped, soft_assert): """Tests power on Metadata: @@ -288,21 +262,22 @@ def test_power_on(self, appliance, testing_vm, ensure_vm_stopped, soft_assert): caseimportance: high tags: power """ - testing_vm.wait_for_vm_state_change( - desired_state=testing_vm.STATE_OFF, timeout=720, from_details=True) - testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False, - from_details=True) + create_vm.wait_for_vm_state_change( + desired_state=create_vm.STATE_OFF, timeout=720, from_details=True) + create_vm.power_control_from_cfme(option=create_vm.POWER_ON, + cancel=False, + from_details=True) view = appliance.browser.create_view(BaseLoggedInPage) view.flash.assert_success_message(text='Start initiated', partial=True) - if_scvmm_refresh_provider(testing_vm.provider) - testing_vm.wait_for_vm_state_change( - desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) - soft_assert(testing_vm.mgmt.is_running, "vm not running") + if_scvmm_refresh_provider(create_vm.provider) + create_vm.wait_for_vm_state_change( + desired_state=create_vm.STATE_ON, timeout=720, from_details=True) + soft_assert(create_vm.mgmt.is_running, "vm not running") @pytest.mark.meta(automates=[BZ(1174858)]) - def test_suspend(self, appliance, testing_vm, ensure_vm_running, soft_assert): + def test_suspend(self, appliance, create_vm, ensure_vm_running, soft_assert): """Tests suspend Polarion: @@ -315,28 +290,28 @@ def test_suspend(self, appliance, testing_vm, ensure_vm_running, soft_assert): Bugzilla: 1174858 """ - testing_vm.wait_for_vm_state_change( - desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) - view = navigate_to(testing_vm, "Details") + create_vm.wait_for_vm_state_change( + desired_state=create_vm.STATE_ON, timeout=720, from_details=True) + view = navigate_to(create_vm, "Details") last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time") - testing_vm.power_control_from_cfme(option=testing_vm.SUSPEND, - cancel=False, - from_details=True) + create_vm.power_control_from_cfme(option=create_vm.SUSPEND, + cancel=False, + from_details=True) view.flash.assert_success_message(text='Suspend initiated', partial=True) - if_scvmm_refresh_provider(testing_vm.provider) - testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_SUSPENDED, - timeout=450, - from_details=True) - soft_assert(testing_vm.mgmt.is_suspended, "vm not suspended") - if not testing_vm.provider.one_of(RHEVMProvider): + if_scvmm_refresh_provider(create_vm.provider) + create_vm.wait_for_vm_state_change(desired_state=create_vm.STATE_SUSPENDED, + timeout=450, + from_details=True) + soft_assert(create_vm.mgmt.is_suspended, "vm not suspended") + if not create_vm.provider.one_of(RHEVMProvider): new_last_boot_time = view.entities.summary("Power Management").get_text_of( "Last Boot Time") soft_assert(new_last_boot_time == last_boot_time, f"ui: {new_last_boot_time} should == orig: {last_boot_time}") - def test_start_from_suspend(self, appliance, testing_vm, ensure_vm_suspended, soft_assert): + def test_start_from_suspend(self, appliance, create_vm, ensure_vm_suspended, soft_assert): """Tests start from suspend Polarion: @@ -348,26 +323,27 @@ def test_start_from_suspend(self, appliance, testing_vm, ensure_vm_suspended, so """ try: - testing_vm.provider.refresh_provider_relationships() - testing_vm.wait_for_vm_state_change( - desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True) + create_vm.provider.refresh_provider_relationships() + create_vm.wait_for_vm_state_change( + desired_state=create_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError: - if testing_vm.provider.one_of(RHEVMProvider): + if create_vm.provider.one_of(RHEVMProvider): logger.warning('working around bz1174858, ignoring timeout') else: raise - view = navigate_to(testing_vm, "Details") + view = navigate_to(create_vm, "Details") last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time") - testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False, - from_details=True) + create_vm.power_control_from_cfme(option=create_vm.POWER_ON, + cancel=False, + from_details=True) view.flash.assert_success_message(text='Start initiated', partial=True) - if_scvmm_refresh_provider(testing_vm.provider) - testing_vm.wait_for_vm_state_change( - desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) - wait_for_last_boot_timestamp_refresh(testing_vm, last_boot_time, timeout=600) - soft_assert(testing_vm.mgmt.is_running, "vm not running") + if_scvmm_refresh_provider(create_vm.provider) + create_vm.wait_for_vm_state_change( + desired_state=create_vm.STATE_ON, timeout=720, from_details=True) + wait_for_last_boot_timestamp_refresh(create_vm, last_boot_time, timeout=600) + soft_assert(create_vm.mgmt.is_running, "vm not running") def test_no_template_power_control(provider, soft_assert): @@ -428,7 +404,7 @@ def test_no_template_power_control(provider, soft_assert): ) ] ) -def test_no_power_controls_on_archived_vm(appliance, testing_vm, archived_vm, soft_assert): +def test_no_power_controls_on_archived_vm(appliance, create_vm, archived_vm, soft_assert): """ Ensures that no power button is displayed from details view of archived vm Polarion: @@ -445,12 +421,12 @@ def test_no_power_controls_on_archived_vm(appliance, testing_vm, archived_vm, so 1520489 1659340 """ - view = navigate_to(testing_vm, 'AnyProviderDetails', use_resetter=False) + view = navigate_to(create_vm, 'AnyProviderDetails', use_resetter=False) status = getattr(view.toolbar.power, "is_enabled") assert not status, "Power displayed in archived VM's details!" -def test_archived_vm_status(testing_vm, archived_vm): +def test_archived_vm_status(create_vm, archived_vm): """Tests archived vm status Metadata: @@ -463,11 +439,11 @@ def test_archived_vm_status(testing_vm, archived_vm): initialEstimate: 1/8h tags: power """ - vm_state = testing_vm.find_quadicon(from_any_provider=True).data['state'] + vm_state = create_vm.find_quadicon(from_any_provider=True).data['state'] assert (vm_state == 'archived') -def test_orphaned_vm_status(testing_vm, orphaned_vm): +def test_orphaned_vm_status(create_vm, orphaned_vm): """Tests orphaned vm status Polarion: @@ -476,11 +452,11 @@ def test_orphaned_vm_status(testing_vm, orphaned_vm): casecomponent: Infra tags: power """ - vm_state = testing_vm.find_quadicon(from_any_provider=True).data['state'] + vm_state = create_vm.find_quadicon(from_any_provider=True).data['state'] assert (vm_state == 'orphaned') -def test_vm_power_options_from_on(provider, soft_assert, testing_vm, ensure_vm_running): +def test_vm_power_options_from_on(provider, soft_assert, create_vm, ensure_vm_running): """Tests vm power options from on Metadata: @@ -491,13 +467,13 @@ def test_vm_power_options_from_on(provider, soft_assert, testing_vm, ensure_vm_r casecomponent: Infra initialEstimate: 1/4h """ - testing_vm.wait_for_vm_state_change( - desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) - check_power_options(provider, soft_assert, testing_vm, testing_vm.STATE_ON) + create_vm.wait_for_vm_state_change( + desired_state=create_vm.STATE_ON, timeout=720, from_details=True) + check_power_options(provider, soft_assert, create_vm, create_vm.STATE_ON) @pytest.mark.meta(automates=[BZ(1724062)]) -def test_vm_power_options_from_off(provider, soft_assert, testing_vm, ensure_vm_stopped): +def test_vm_power_options_from_off(provider, soft_assert, create_vm, ensure_vm_stopped): """Tests vm power options from off Metadata: @@ -513,14 +489,15 @@ def test_vm_power_options_from_off(provider, soft_assert, testing_vm, ensure_vm_ """ # TODO(ghubale@redhat.com): Update this test case with power options(shutdown and restart guest) # for scvmm provider - testing_vm.wait_for_vm_state_change( - desired_state=testing_vm.STATE_OFF, timeout=720, from_details=True) - check_power_options(provider, soft_assert, testing_vm, testing_vm.STATE_OFF) + create_vm.wait_for_vm_state_change( + desired_state=create_vm.STATE_OFF, timeout=720, from_details=True) + check_power_options(provider, soft_assert, create_vm, create_vm.STATE_OFF) @pytest.mark.provider([VMwareProvider, RHEVMProvider], scope='function') @pytest.mark.meta(automates=[1571830, 1650506]) -def test_guest_os_reset(appliance, provider, testing_vm_tools, ensure_vm_running, soft_assert): +@pytest.mark.parametrize('create_vm', ['full_template'], indirect=True) +def test_guest_os_reset(appliance, provider, create_vm, ensure_vm_running, soft_assert): """Tests vm guest os reset Metadata: @@ -537,23 +514,23 @@ def test_guest_os_reset(appliance, provider, testing_vm_tools, ensure_vm_running 1650506 """ # TODO(ghubale@redhat.com): Update this test case for power operation(restart guest) for scvmm - wait_for_vm_tools(testing_vm_tools) - view = navigate_to(testing_vm_tools, "Details") + wait_for_vm_tools(create_vm) + view = navigate_to(create_vm, "Details") last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time") state_changed_on = view.entities.summary("Power Management").get_text_of("State Changed On") - testing_vm_tools.power_control_from_cfme( - option=testing_vm_tools.GUEST_RESTART, cancel=False, from_details=True) + create_vm.power_control_from_cfme( + option=create_vm.GUEST_RESTART, cancel=False, from_details=True) view.flash.assert_success_message(text='Restart Guest initiated', partial=True) if not (provider.one_of(RHEVMProvider) and BZ(1571830, forced_streams=["5.10", "5.11"]).blocks): soft_assert( - wait_for_last_boot_timestamp_refresh(testing_vm_tools, last_boot_time), + wait_for_last_boot_timestamp_refresh(create_vm, last_boot_time), "Last Boot Time value has not been refreshed", ) soft_assert( - ensure_state_changed_on_unchanged(testing_vm_tools, state_changed_on), + ensure_state_changed_on_unchanged(create_vm, state_changed_on), "Value of 'State Changed On' has changed after guest restart", ) - soft_assert(testing_vm_tools.mgmt.is_running, "vm not running") + soft_assert(create_vm.mgmt.is_running, "vm not running") @pytest.mark.meta(automates=[1723485, 1571895, 1650506]) @@ -561,7 +538,8 @@ def test_guest_os_reset(appliance, provider, testing_vm_tools, ensure_vm_running @pytest.mark.meta(blockers=[BZ(1723485, forced_streams=["5.11"], unblock=lambda provider: not (provider.one_of(RHEVMProvider) and not provider.version < 4.3))]) -def test_guest_os_shutdown(appliance, provider, testing_vm_tools, ensure_vm_running, soft_assert): +@pytest.mark.parametrize('create_vm', ['full_template'], indirect=True) +def test_guest_os_shutdown(appliance, provider, create_vm, ensure_vm_running, soft_assert): """Tests vm guest os reset Polarion: @@ -577,20 +555,20 @@ def test_guest_os_shutdown(appliance, provider, testing_vm_tools, ensure_vm_runn 1650506 """ # TODO(ghubale@redhat.com): Update this test case for power operation(shutdown guest) for scvmm - testing_vm_tools.wait_for_vm_state_change( - desired_state=testing_vm_tools.STATE_ON, timeout=720, from_details=True) - wait_for_vm_tools(testing_vm_tools) - view = navigate_to(testing_vm_tools, "Details") + create_vm.wait_for_vm_state_change( + desired_state=create_vm.STATE_ON, timeout=720, from_details=True) + wait_for_vm_tools(create_vm) + view = navigate_to(create_vm, "Details") last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time") - testing_vm_tools.power_control_from_cfme( - option=testing_vm_tools.GUEST_SHUTDOWN, cancel=False, from_details=True) + create_vm.power_control_from_cfme( + option=create_vm.GUEST_SHUTDOWN, cancel=False, from_details=True) view.flash.assert_success_message(text='Shutdown Guest initiated', partial=True) - testing_vm_tools.wait_for_vm_state_change( - desired_state=testing_vm_tools.STATE_OFF, timeout=720, from_details=True) + create_vm.wait_for_vm_state_change( + desired_state=create_vm.STATE_OFF, timeout=720, from_details=True) soft_assert( - not testing_vm_tools.mgmt.is_running, "vm running") + not create_vm.mgmt.is_running, "vm running") # Blocking this assertion for RHEV providers because of BZ(1571895) not fixed yet if not (BZ(1571895, forced_streams=["5.10", "5.11"]).blocks and provider.one_of(RHEVMProvider)): @@ -614,7 +592,7 @@ def new_user(request, appliance): @pytest.mark.tier(1) @pytest.mark.meta(automates=[1687597]) @pytest.mark.provider([VMwareProvider], selector=ONE_PER_TYPE) -def test_retire_vm_with_vm_user_role(new_user, appliance, testing_vm): +def test_retire_vm_with_vm_user_role(new_user, appliance, create_vm): """ Bugzilla: 1687597 @@ -633,28 +611,33 @@ def test_retire_vm_with_vm_user_role(new_user, appliance, testing_vm): """ # Log in with new user to retire the vm with new_user: - view = navigate_to(testing_vm.parent, "All") - view.entities.get_entity(name=testing_vm.name, surf_pages=True).ensure_checked() + view = navigate_to(create_vm.parent, "All") + view.entities.get_entity(name=create_vm.name, surf_pages=True).ensure_checked() assert view.toolbar.lifecycle.item_enabled("Retire selected items") - testing_vm.retire() - assert testing_vm.wait_for_vm_state_change(desired_state="retired", timeout=720, - from_details=True) + create_vm.retire() + assert create_vm.wait_for_vm_state_change(desired_state="retired", + timeout=720, + from_details=True) @pytest.fixture(params=['archived', 'orphaned']) -def archive_orphan_vm(request, provider, testing_vm): +def archive_orphan_vm(request, provider, create_vm): """This fixture is used to create archived or orphaned VM""" if request.param == "archived": # Archive VM by retiring it - testing_vm.mgmt.delete() - testing_vm.wait_for_vm_state_change(desired_state='archived', timeout=720, - from_details=False, from_any_provider=True) + create_vm.mgmt.delete() + create_vm.wait_for_vm_state_change(desired_state='archived', + timeout=720, + from_details=False, + from_any_provider=True) else: # Orphan VM by removing provider from CFME provider.delete_if_exists(cancel=False) - testing_vm.wait_for_vm_state_change(desired_state='orphaned', timeout=720, - from_details=False, from_any_provider=True) - yield request.param, testing_vm + create_vm.wait_for_vm_state_change(desired_state='orphaned', + timeout=720, + from_details=False, + from_any_provider=True) + yield request.param, create_vm @pytest.mark.meta(automates=[1655477, 1686015]) @@ -681,17 +664,17 @@ def test_power_options_on_archived_orphaned_vms_all_page(appliance, archive_orph 3. Select any VM and click on power option drop-down """ infra_vms = appliance.collections.infra_vms - state, testing_vm = archive_orphan_vm + state, create_vm = archive_orphan_vm if state == "archived": view = navigate_to(infra_vms, 'ArchivedAll') # Selecting particular archived vm - testing_vm.find_quadicon(from_archived_all=True).ensure_checked() + create_vm.find_quadicon(from_archived_all=True).ensure_checked() else: view = navigate_to(infra_vms, 'OrphanedAll') # Selecting particular orphaned vm - testing_vm.find_quadicon(from_orphaned_all=True).ensure_checked() + create_vm.find_quadicon(from_orphaned_all=True).ensure_checked() # After selecting particular archived/orphaned vm; 'Power' drop down gets enabled. # Reading all the options available in 'power' drop down diff --git a/cfme/tests/infrastructure/test_vm_reconfigure.py b/cfme/tests/infrastructure/test_vm_reconfigure.py index d9acfcfa63..4f3767bb5e 100644 --- a/cfme/tests/infrastructure/test_vm_reconfigure.py +++ b/cfme/tests/infrastructure/test_vm_reconfigure.py @@ -14,7 +14,6 @@ from cfme.utils.appliance import ViaUI from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ -from cfme.utils.generators import random_vm_name from cfme.utils.rest import assert_response from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for @@ -58,38 +57,19 @@ def reconfigure_vm(vm, config): @pytest.fixture(scope='function') -def full_vm(appliance, provider, full_template): - """This fixture is function-scoped, because there is no un-ambiguous way how to search for - reconfigure request in UI in situation when you have two requests for the same reconfiguration - and for the same VM name. This happens if you run test_vm_reconfig_add_remove_hw_cold and then - test_vm_reconfig_add_remove_hw_hot or vice versa. Making thix fixture function-scoped will - ensure that the VM under test has a different name each time so the reconfigure requests - are unique as a result.""" - vm = appliance.collections.infra_vms.instantiate(random_vm_name(context='reconfig'), - provider, - full_template.name) - vm.create_on_provider(find_in_cfme=True, allow_skip="default") - vm.refresh_relationships() - - yield vm - - vm.cleanup_on_provider() - - -@pytest.fixture(scope='function') -def ensure_vm_stopped(full_vm): - if full_vm.is_pwr_option_available_in_cfme(full_vm.POWER_OFF): - full_vm.mgmt.ensure_state(VmState.STOPPED) - full_vm.wait_for_vm_state_change(full_vm.STATE_OFF) +def ensure_vm_stopped(create_vm): + if create_vm.is_pwr_option_available_in_cfme(create_vm.POWER_OFF): + create_vm.mgmt.ensure_state(VmState.STOPPED) + create_vm.wait_for_vm_state_change(create_vm.STATE_OFF) else: raise Exception("Unknown power state - unable to continue!") @pytest.fixture(scope='function') -def ensure_vm_running(full_vm): - if full_vm.is_pwr_option_available_in_cfme(full_vm.POWER_ON): - full_vm.mgmt.ensure_state(VmState.RUNNING) - full_vm.wait_for_vm_state_change(full_vm.STATE_ON) +def ensure_vm_running(create_vm): + if create_vm.is_pwr_option_available_in_cfme(create_vm.POWER_ON): + create_vm.mgmt.ensure_state(VmState.RUNNING) + create_vm.wait_for_vm_state_change(create_vm.STATE_ON) else: raise Exception("Unknown power state - unable to continue!") @@ -109,17 +89,17 @@ def _vm_state(vm, state): @pytest.fixture(params=["cold", "hot"]) -def vm_state(request, full_vm): - _vm_state(full_vm, request.param) +def vm_state(request, create_vm): + _vm_state(create_vm, request.param) return request.param @pytest.fixture(scope='function') -def enable_hot_plugin(provider, full_vm, ensure_vm_stopped): +def enable_hot_plugin(provider, create_vm, ensure_vm_stopped): # Operation on Provider side # Hot plugin enable only needs for Vsphere Provider if provider.one_of(VMwareProvider): - vm = provider.mgmt.get_vm(full_vm.name) + vm = provider.mgmt.get_vm(create_vm.name) vm.cpu_hot_plug = True vm.memory_hot_plug = True @@ -164,7 +144,8 @@ def _is_succeeded(reconfig_request): @pytest.mark.parametrize('change_type', ['cores_per_socket', 'sockets', 'memory']) -def test_vm_reconfig_add_remove_hw_cold(provider, full_vm, ensure_vm_stopped, change_type): +@pytest.mark.parametrize('create_vm', ['full_template'], indirect=True) +def test_vm_reconfig_add_remove_hw_cold(provider, create_vm, ensure_vm_stopped, change_type): """ Polarion: assignee: nansari @@ -172,19 +153,20 @@ def test_vm_reconfig_add_remove_hw_cold(provider, full_vm, ensure_vm_stopped, ch initialEstimate: 1/3h tags: reconfigure """ - orig_config = full_vm.configuration.copy() + orig_config = create_vm.configuration.copy() new_config = prepare_new_config(orig_config, change_type) # Apply new config - reconfigure_vm(full_vm, new_config) + reconfigure_vm(create_vm, new_config) # Revert back to original config - reconfigure_vm(full_vm, orig_config) + reconfigure_vm(create_vm, orig_config) @pytest.mark.parametrize('disk_type', ['thin', 'thick']) @pytest.mark.parametrize( 'disk_mode', ['persistent', 'independent_persistent', 'independent_nonpersistent']) +@pytest.mark.parametrize('create_vm', ['full_template'], indirect=True) # Disk modes cannot be specified when adding disk to VM in RHV provider @pytest.mark.uncollectif(lambda disk_mode, provider: disk_mode != 'persistent' and provider.one_of(RHEVMProvider), @@ -193,7 +175,7 @@ def test_vm_reconfig_add_remove_hw_cold(provider, full_vm, ensure_vm_stopped, ch blockers=[BZ(1692801, forced_streams=['5.10'], unblock=lambda provider: not provider.one_of(RHEVMProvider))] ) -def test_vm_reconfig_add_remove_disk(provider, full_vm, vm_state, disk_type, disk_mode): +def test_vm_reconfig_add_remove_disk(provider, create_vm, vm_state, disk_type, disk_mode): """ Polarion: assignee: nansari @@ -210,37 +192,38 @@ def test_vm_reconfig_add_remove_disk(provider, full_vm, vm_state, disk_type, dis 5. Check the count in VM details page 6. Remove the disk and Check the count in VM details page """ - orig_config = full_vm.configuration.copy() + orig_config = create_vm.configuration.copy() new_config = orig_config.copy() new_config.add_disk( size=500, size_unit='MB', type=disk_type, mode=disk_mode) - add_disk_request = full_vm.reconfigure(new_config) + add_disk_request = create_vm.reconfigure(new_config) # Add disk request verification wait_for(add_disk_request.is_succeeded, timeout=360, delay=45, message="confirm that disk was added") # Add disk UI verification wait_for( - lambda: full_vm.configuration.num_disks == new_config.num_disks, timeout=360, delay=45, - fail_func=full_vm.refresh_relationships, + lambda: create_vm.configuration.num_disks == new_config.num_disks, timeout=360, delay=45, + fail_func=create_vm.refresh_relationships, message="confirm that disk was added") msg = "Disk wasn't added to VM config" - assert full_vm.configuration.num_disks == new_config.num_disks, msg + assert create_vm.configuration.num_disks == new_config.num_disks, msg - remove_disk_request = full_vm.reconfigure(orig_config) + remove_disk_request = create_vm.reconfigure(orig_config) # Remove disk request verification wait_for(remove_disk_request.is_succeeded, timeout=360, delay=45, message="confirm that previously-added disk was removed") # Remove disk UI verification wait_for( - lambda: full_vm.configuration.num_disks == orig_config.num_disks, timeout=360, delay=45, - fail_func=full_vm.refresh_relationships, + lambda: create_vm.configuration.num_disks == orig_config.num_disks, timeout=360, delay=45, + fail_func=create_vm.refresh_relationships, message="confirm that previously-added disk was removed") msg = "Disk wasn't removed from VM config" - assert full_vm.configuration.num_disks == orig_config.num_disks, msg + assert create_vm.configuration.num_disks == orig_config.num_disks, msg -def test_reconfig_vm_negative_cancel(provider, full_vm, ensure_vm_stopped): +@pytest.mark.parametrize('create_vm', ['full_template'], indirect=True) +def test_reconfig_vm_negative_cancel(provider, create_vm, ensure_vm_stopped): """ Cancel reconfiguration changes Polarion: @@ -249,7 +232,7 @@ def test_reconfig_vm_negative_cancel(provider, full_vm, ensure_vm_stopped): initialEstimate: 1/3h tags: reconfigure """ - config_vm = full_vm.configuration.copy() + config_vm = create_vm.configuration.copy() # Some changes in vm reconfigure before cancel config_vm.hw.cores_per_socket = config_vm.hw.cores_per_socket + 1 @@ -259,14 +242,15 @@ def test_reconfig_vm_negative_cancel(provider, full_vm, ensure_vm_stopped): config_vm.add_disk( size=5, size_unit='GB', type='thin', mode='persistent') - full_vm.reconfigure(config_vm, cancel=True) + create_vm.reconfigure(config_vm, cancel=True) @pytest.mark.meta( blockers=[BZ(1697967, unblock=lambda provider: not provider.one_of(RHEVMProvider))]) @pytest.mark.parametrize('change_type', ['sockets', 'memory']) +@pytest.mark.parametrize('create_vm', ['full_template'], indirect=True) def test_vm_reconfig_add_remove_hw_hot( - provider, full_vm, enable_hot_plugin, ensure_vm_running, change_type): + provider, create_vm, enable_hot_plugin, ensure_vm_running, change_type): """Change number of CPU sockets and amount of memory while VM is running. Changing number of cores per socket on running VM is not supported by RHV. @@ -276,18 +260,18 @@ def test_vm_reconfig_add_remove_hw_hot( initialEstimate: 1/4h tags: reconfigure """ - orig_config = full_vm.configuration.copy() + orig_config = create_vm.configuration.copy() new_config = prepare_new_config(orig_config, change_type) assert vars(orig_config.hw) != vars(new_config.hw) # Apply new config - reconfigure_vm(full_vm, new_config) + reconfigure_vm(create_vm, new_config) - assert vars(full_vm.configuration.hw) == vars(new_config.hw) + assert vars(create_vm.configuration.hw) == vars(new_config.hw) # Revert back to original config only supported by RHV if provider.one_of(RHEVMProvider): - reconfigure_vm(full_vm, orig_config) + reconfigure_vm(create_vm, orig_config) @pytest.mark.provider([VMwareProvider]) @@ -295,10 +279,11 @@ def test_vm_reconfig_add_remove_hw_hot( @pytest.mark.parametrize('disk_mode', ['persistent', 'independent_persistent', 'independent_nonpersistent']) +@pytest.mark.parametrize('create_vm', ['full_template'], indirect=True) @pytest.mark.uncollectif(lambda disk_mode, vm_state: disk_mode == 'independent_nonpersistent' and vm_state == 'hot', reason='Disk resize not supported for hot vm independent_nonpersistent') -def test_vm_reconfig_resize_disk(appliance, full_vm, vm_state, disk_type, disk_mode): +def test_vm_reconfig_resize_disk(appliance, create_vm, vm_state, disk_type, disk_mode): """ Resize the disk while VM is running and not running Polarion: assignee: nansari @@ -308,7 +293,7 @@ def test_vm_reconfig_resize_disk(appliance, full_vm, vm_state, disk_type, disk_m casecomponent: Infra """ # get initial disks for later comparison - initial_disks = [disk.filename for disk in full_vm.configuration.disks] + initial_disks = [disk.filename for disk in create_vm.configuration.disks] add_data = [ { "disk_size_in_mb": 20, @@ -320,18 +305,18 @@ def test_vm_reconfig_resize_disk(appliance, full_vm, vm_state, disk_type, disk_m } ] # disk will be added to the VM via REST - vm_reconfig_via_rest(appliance, "disk_add", full_vm.rest_api_entity.id, add_data) + vm_reconfig_via_rest(appliance, "disk_add", create_vm.rest_api_entity.id, add_data) # assert the new disk was added assert wait_for( - lambda: full_vm.configuration.num_disks > len(initial_disks), - fail_func=full_vm.refresh_relationships, + lambda: create_vm.configuration.num_disks > len(initial_disks), + fail_func=create_vm.refresh_relationships, delay=5, timeout=200, ) # there will always be 2 disks after the disk has been added - disks_present = [disk.filename for disk in full_vm.configuration.disks] + disks_present = [disk.filename for disk in create_vm.configuration.disks] # get the newly added disk try: disk_added = list(set(disks_present) - set(initial_disks))[0] @@ -340,16 +325,16 @@ def test_vm_reconfig_resize_disk(appliance, full_vm, vm_state, disk_type, disk_m # resize the disk disk_size = 500 - new_config = full_vm.configuration.copy() + new_config = create_vm.configuration.copy() new_config.resize_disk(size_unit='MB', size=disk_size, filename=disk_added) - resize_disk_request = full_vm.reconfigure(new_configuration=new_config) + resize_disk_request = create_vm.reconfigure(new_configuration=new_config) # Resize disk request verification wait_for(resize_disk_request.is_succeeded, timeout=360, delay=45, message="confirm that disk was Resize") # assert the new disk size was added - view = navigate_to(full_vm, 'Reconfigure') + view = navigate_to(create_vm, 'Reconfigure') assert int(view.disks_table.row(name=disk_added)["Size"].text) == disk_size @@ -360,7 +345,8 @@ def test_vm_reconfig_resize_disk(appliance, full_vm, vm_state, disk_type, disk_m @pytest.mark.parametrize('disk_mode', ['persistent', 'independent_persistent', 'independent_nonpersistent']) -def test_vm_reconfig_resize_disk_snapshot(request, disk_type, disk_mode, full_vm, memory=False): +@pytest.mark.parametrize('create_vm', ['full_template'], indirect=True) +def test_vm_reconfig_resize_disk_snapshot(request, disk_type, disk_mode, create_vm, memory=False): """ Bugzilla: @@ -388,12 +374,12 @@ def test_vm_reconfig_resize_disk_snapshot(request, disk_type, disk_mode, full_vm name=fauxfactory.gen_alphanumeric(start="snap_"), description=fauxfactory.gen_alphanumeric(start="desc_"), memory=memory, - parent_vm=full_vm + parent_vm=create_vm ) snapshot.create() request.addfinalizer(snapshot.delete) - view = navigate_to(full_vm, 'Reconfigure') + view = navigate_to(create_vm, 'Reconfigure') row = next(r for r in view.disks_table.rows()) # Delete button should enabled @@ -410,7 +396,8 @@ def test_vm_reconfig_resize_disk_snapshot(request, disk_type, disk_mode, full_vm ["DPortGroup", "VM Network", "Management Network", "VMkernel"], ids=["DPortGroup", "VmNetwork", "MgmtNetwork", "VmKernel"], ) -def test_vm_reconfig_add_remove_network_adapters(request, adapters_type, full_vm): +@pytest.mark.parametrize('create_vm', ['full_template'], indirect=True) +def test_vm_reconfig_add_remove_network_adapters(request, adapters_type, create_vm): """ Polarion: assignee: nansari @@ -426,37 +413,37 @@ def test_vm_reconfig_add_remove_network_adapters(request, adapters_type, full_vm 4. Check the changes in VM reconfiguration page 5. Remove the Adapters """ - orig_config = full_vm.configuration.copy() + orig_config = create_vm.configuration.copy() # Create new configuration with new network adapter new_config = orig_config.copy() new_config.add_network_adapter( f"Network adapter {orig_config.num_network_adapters + 1}", vlan=adapters_type ) - add_adapter_request = full_vm.reconfigure(new_config) + add_adapter_request = create_vm.reconfigure(new_config) add_adapter_request.wait_for_request(method="ui") request.addfinalizer(add_adapter_request.remove_request) # Verify network adapter added or not wait_for( - lambda: full_vm.configuration.num_network_adapters == new_config.num_network_adapters, + lambda: create_vm.configuration.num_network_adapters == new_config.num_network_adapters, timeout=120, delay=10, - fail_func=full_vm.refresh_relationships, + fail_func=create_vm.refresh_relationships, message="confirm that network adapter was added", ) # Remove network adapter - remove_adapter_request = full_vm.reconfigure(orig_config) + remove_adapter_request = create_vm.reconfigure(orig_config) remove_adapter_request.wait_for_request(method="ui") request.addfinalizer(remove_adapter_request.remove_request) # Verify network adapter removed or not wait_for( - lambda: full_vm.configuration.num_network_adapters == orig_config.num_network_adapters, + lambda: create_vm.configuration.num_network_adapters == orig_config.num_network_adapters, timeout=120, delay=10, - fail_func=full_vm.refresh_relationships, + fail_func=create_vm.refresh_relationships, message="confirm that network adapter was added", ) @@ -570,7 +557,8 @@ def vm_reconfig_via_rest(appliance, config_type, vm_id, config_data): @test_requirements.rest @pytest.mark.tier(3) -def test_vm_disk_reconfig_via_rest(appliance, full_vm): +@pytest.mark.parametrize('create_vm', ['full_template'], indirect=True) +def test_vm_disk_reconfig_via_rest(appliance, create_vm): """ Polarion: assignee: pvala @@ -593,9 +581,9 @@ def test_vm_disk_reconfig_via_rest(appliance, full_vm): 1691635 1692801 """ - vm_id = appliance.rest_api.collections.vms.get(name=full_vm.name).id + vm_id = appliance.rest_api.collections.vms.get(name=create_vm.name).id # get initial disks for later comparison - initial_disks = [disk.filename for disk in full_vm.configuration.disks] + initial_disks = [disk.filename for disk in create_vm.configuration.disks] # add a disk to VM add_data = [ @@ -612,18 +600,18 @@ def test_vm_disk_reconfig_via_rest(appliance, full_vm): # assert the new disk was added assert wait_for( - lambda: full_vm.configuration.num_disks > len(initial_disks), - fail_func=full_vm.refresh_relationships, + lambda: create_vm.configuration.num_disks > len(initial_disks), + fail_func=create_vm.refresh_relationships, delay=5, timeout=200, ) # Disk GUID is displayed instead of disk name in the disks table for a rhev VM, and passing # disk GUID to the delete method results in failure, so skip this part until the BZ is fixed. - if not (BZ(1691635).blocks and full_vm.provider.one_of(RHEVMProvider)): + if not (BZ(1691635).blocks and create_vm.provider.one_of(RHEVMProvider)): # there will always be 2 disks after the disk has been added - disks_present = [disk.filename for disk in full_vm.configuration.disks] + disks_present = [disk.filename for disk in create_vm.configuration.disks] disk_added = list(set(disks_present) - set(initial_disks))[0] # remove the newly added disk from VM @@ -633,8 +621,8 @@ def test_vm_disk_reconfig_via_rest(appliance, full_vm): # assert the disk was removed try: wait_for( - lambda: full_vm.configuration.num_disks == len(initial_disks), - fail_func=full_vm.refresh_relationships, + lambda: create_vm.configuration.num_disks == len(initial_disks), + fail_func=create_vm.refresh_relationships, delay=5, timeout=200, ) @@ -642,7 +630,7 @@ def test_vm_disk_reconfig_via_rest(appliance, full_vm): assert ( False ), "Number of disks expected was {expected}, found {actual}".format( - expected=len(initial_disks), actual=full_vm.configuration.num_disks + expected=len(initial_disks), actual=create_vm.configuration.num_disks )