Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix unit tests to run consistently in CI and locally #815

Merged
merged 13 commits into from
Sep 14, 2021
7 changes: 7 additions & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,10 @@
markers =
serial: tests that cannot be reliably ran with pytest multiprocessing
timeout: used with pytest-timeout
addopts =
-r a
--color yes
--showlocals
--verbose
--numprocesses auto
--durations 10
6 changes: 3 additions & 3 deletions test/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@


@pytest.fixture(scope='function')
def rc(tmpdir):
rc = RunnerConfig(str(tmpdir))
def rc(tmp_path):
rc = RunnerConfig(str(tmp_path))
rc.suppress_ansible_output = True
rc.expect_passwords = {
pexpect.TIMEOUT: None,
pexpect.EOF: None
}
rc.cwd = str(tmpdir)
rc.cwd = str(tmp_path)
rc.env = {}
rc.job_timeout = 10
rc.idle_timeout = 0
Expand Down
13 changes: 7 additions & 6 deletions test/integration/test_display_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@


@pytest.fixture()
def executor(tmpdir, request, is_pre_ansible28):
private_data_dir = six.text_type(tmpdir.mkdir('foo'))
def executor(tmp_path, request, is_pre_ansible28):
private_data_dir = tmp_path / 'foo'
private_data_dir.mkdir()

playbooks = request.node.callspec.params.get('playbook')
playbook = list(playbooks.values())[0]
Expand Down Expand Up @@ -323,7 +324,7 @@ def test_module_level_no_log(executor, playbook, skipif_pre_ansible28):
assert 'SENSITIVE' not in json.dumps(list(executor.events))


def test_output_when_given_invalid_playbook(tmpdir):
def test_output_when_given_invalid_playbook(tmp_path):
# As shown in the following issue:
#
# https://github.com/ansible/ansible-runner/issues/29
Expand All @@ -334,7 +335,7 @@ def test_output_when_given_invalid_playbook(tmpdir):
# https://github.com/ansible/ansible-runner/pull/34
#
# But no test validated it. This does that.
private_data_dir = str(tmpdir)
private_data_dir = str(tmp_path)
executor = init_runner(
private_data_dir=private_data_dir,
inventory="localhost ansible_connection=local",
Expand All @@ -348,7 +349,7 @@ def test_output_when_given_invalid_playbook(tmpdir):
assert "could not be found" in stdout


def test_output_when_given_non_playbook_script(tmpdir):
def test_output_when_given_non_playbook_script(tmp_path):
# As shown in the following pull request:
#
# https://github.com/ansible/ansible-runner/pull/256
Expand All @@ -360,7 +361,7 @@ def test_output_when_given_non_playbook_script(tmpdir):
# this is a retro-active test based on the sample repo provided in the PR:
#
# https://github.com/AlanCoding/ansible-runner-examples/tree/master/non_playbook/sleep_with_writes
private_data_dir = str(tmpdir)
private_data_dir = str(tmp_path)
with open(os.path.join(private_data_dir, "args"), 'w') as args_file:
args_file.write("bash sleep_and_write.sh\n")
with open(os.path.join(private_data_dir, "sleep_and_write.sh"), 'w') as script_file:
Expand Down
62 changes: 30 additions & 32 deletions test/integration/test_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,25 +99,22 @@ def test_module_run():
shutil.rmtree('./ping')


def test_module_run_debug():
try:
rc = main(['run', '-m', 'ping',
'--hosts', 'localhost',
'--debug',
'ping'])
assert os.path.exists('./ping')
assert os.path.exists('./ping/artifacts')
assert rc == 0
finally:
if os.path.exists('./ping'):
shutil.rmtree('./ping')
def test_module_run_debug(tmp_path):
output = tmp_path / 'ping'
rc = main(['run', '-m', 'ping',
'--hosts', 'localhost',
'--debug',
str(output)])

assert output.exists()
assert output.joinpath('artifacts').exists()
assert rc == 0

def test_module_run_clean():
with temp_directory() as temp_dir:
rc = main(['run', '-m', 'ping',
'--hosts', 'localhost',
temp_dir])

def test_module_run_clean(tmp_path):
rc = main(['run', '-m', 'ping',
'--hosts', 'localhost',
str(tmp_path)])
assert rc == 0


Expand All @@ -138,28 +135,29 @@ def test_role_run_abs():
assert rc == 0


def test_role_logfile(skipif_pre_ansible28, clear_integration_artifacts):
# FIXME: This test interferes with test_role_logfile_abs. Marking it as serial so it is executed
# in a separate test run.
@pytest.mark.serial
def test_role_logfile(skipif_pre_ansible28, clear_integration_artifacts, tmp_path):
logfile = tmp_path.joinpath('test_role_logfile')
rc = main(['run', '-r', 'benthomasson.hello_role',
'--hosts', 'localhost',
'--roles-path', 'test/integration/project/roles',
'--logfile', 'test_role_logfile',
'--logfile', str(logfile),
'test/integration'])
assert os.path.exists('test_role_logfile'), rc
assert logfile.exists()
assert rc == 0


def test_role_logfile_abs():
try:
with temp_directory() as temp_dir:
rc = main(['run', '-r', 'benthomasson.hello_role',
'--hosts', 'localhost',
'--roles-path', os.path.join(HERE, 'project/roles'),
'--logfile', 'new_logfile',
temp_dir])
assert os.path.exists('new_logfile')
assert rc == 0
finally:
ensure_removed("new_logfile")
def test_role_logfile_abs(tmp_path):
rc = main(['run', '-r', 'benthomasson.hello_role',
'--hosts', 'localhost',
'--roles-path', os.path.join(HERE, 'project/roles'),
'--logfile', tmp_path.joinpath('new_logfile').as_posix(),
str(tmp_path)])

assert tmp_path.joinpath('new_logfile').exists()
assert rc == 0


def test_role_bad_project_dir():
Expand Down
39 changes: 25 additions & 14 deletions test/integration/test_transmit_worker_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,14 @@ def check_artifacts(self, process_dir, job_type):
assert '"ansible_facts"' in stdout

@pytest.mark.parametrize("job_type", ['run', 'adhoc'])
def test_remote_job_interface(self, tmpdir, test_data_dir, job_type):
def test_remote_job_interface(self, tmp_path, test_data_dir, job_type):
transmit_dir = os.path.join(test_data_dir, 'debug')
worker_dir = str(tmpdir.mkdir('for_worker'))
process_dir = str(tmpdir.mkdir('for_process'))
worker_dir = tmp_path / 'for_worker'
worker_dir.mkdir()

process_dir = tmp_path / 'for_process'
process_dir.mkdir()

job_kwargs = self.get_job_kwargs(job_type)

outgoing_buffer = tempfile.NamedTemporaryFile()
Expand Down Expand Up @@ -90,17 +94,21 @@ def test_remote_job_interface(self, tmpdir, test_data_dir, job_type):
processor = Processor(_input=incoming_buffer, private_data_dir=process_dir)
processor.run()

self.check_artifacts(process_dir, job_type)
self.check_artifacts(str(process_dir), job_type)

@pytest.mark.parametrize("job_type", ['run', 'adhoc'])
def test_remote_job_by_sockets(self, tmpdir, test_data_dir, container_runtime_installed, job_type):
def test_remote_job_by_sockets(self, tmp_path, test_data_dir, container_runtime_installed, job_type):
"""This test case is intended to be close to how the AWX use case works
the process interacts with receptorctl with sockets
sockets are used here, but worker is manually called instead of invoked by receptor
"""
transmit_dir = os.path.join(test_data_dir, 'debug')
worker_dir = str(tmpdir.mkdir('for_worker'))
process_dir = str(tmpdir.mkdir('for_process'))
worker_dir = tmp_path / 'for_worker'
worker_dir.mkdir()

process_dir = tmp_path / 'for_process'
process_dir.mkdir()

job_kwargs = self.get_job_kwargs(job_type)

def transmit_method(transmit_sockfile_write):
Expand Down Expand Up @@ -155,7 +163,7 @@ def process_method(results_sockfile_read):

assert set(os.listdir(worker_dir)) == {'artifacts', 'inventory', 'project', 'env'}

self.check_artifacts(process_dir, job_type)
self.check_artifacts(str(process_dir), job_type)


@pytest.fixture(scope='session')
Expand Down Expand Up @@ -207,8 +215,9 @@ def test_missing_private_dir_transmit(tmpdir):
assert "private_data_dir path is either invalid or does not exist" in str(excinfo.value)


def test_garbage_private_dir_worker(tmpdir):
worker_dir = str(tmpdir.mkdir('for_worker'))
def test_garbage_private_dir_worker(tmp_path):
worker_dir = tmp_path / 'for_worker'
worker_dir.mkdir()
incoming_buffer = io.BytesIO(
b'{"kwargs": {"playbook": "debug.yml"}}\n{"zipfile": 5}\n\x01\x02\x03\x04\x05{"eof": true}\n')
outgoing_buffer = io.BytesIO()
Expand All @@ -224,8 +233,9 @@ def test_garbage_private_dir_worker(tmpdir):
assert b'"status": "error"' in sent


def test_unparsable_private_dir_worker(tmpdir):
worker_dir = str(tmpdir.mkdir('for_worker'))
def test_unparsable_private_dir_worker(tmp_path):
worker_dir = tmp_path / 'for_worker'
worker_dir.mkdir()
incoming_buffer = io.BytesIO(b'')
outgoing_buffer = io.BytesIO()

Expand All @@ -240,8 +250,9 @@ def test_unparsable_private_dir_worker(tmpdir):
assert b'"status": "error"' in sent


def test_unparsable_private_dir_processor(tmpdir):
process_dir = str(tmpdir.mkdir('for_process'))
def test_unparsable_private_dir_processor(tmp_path):
process_dir = tmp_path / 'for_process'
process_dir.mkdir()
incoming_buffer = io.BytesIO(b'')

processor = run(
Expand Down
1 change: 1 addition & 0 deletions test/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
pytest
pytest-mock
pytest-xdist
mock
flake8
Expand Down
83 changes: 49 additions & 34 deletions test/unit/config/test__base.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,8 +250,8 @@ def test_wrap_args_with_ssh_agent_silent():
@patch('os.path.isdir', return_value=False)
@patch('os.path.exists', return_value=True)
@patch('os.makedirs', return_value=True)
def test_container_volume_mounting_with_Z(mock_isdir, mock_exists, mock_makedirs, tmpdir):
rc = BaseConfig(private_data_dir=str(tmpdir))
def test_container_volume_mounting_with_Z(mock_isdir, mock_exists, mock_makedirs, tmp_path):
rc = BaseConfig(private_data_dir=str(tmp_path))
os.path.isdir = Mock()
rc.container_volume_mounts = ['project_path:project_path:Z']
rc.container_name = 'foo'
Expand All @@ -275,45 +275,60 @@ def test_container_volume_mounting_with_Z(mock_isdir, mock_exists, mock_makedirs


@pytest.mark.parametrize('container_runtime', ['docker', 'podman'])
def test_containerization_settings(tmpdir, container_runtime):
with patch('ansible_runner.config._base.BaseConfig.containerized', new_callable=PropertyMock) as mock_containerized:
rc = BaseConfig(private_data_dir=tmpdir)
rc.ident = 'foo'
rc.cmdline_args = ['main.yaml', '-i', '/tmp/inventory']
rc.command = ['ansible-playbook'] + rc.cmdline_args
rc.process_isolation = True
rc.runner_mode = 'pexpect'
rc.process_isolation_executable = container_runtime
rc.container_image = 'my_container'
rc.container_volume_mounts = ['/host1:/container1', 'host2:/container2']
mock_containerized.return_value = True
rc.execution_mode = BaseExecutionMode.ANSIBLE_COMMANDS
rc._prepare_env()
rc._handle_command_wrap(rc.execution_mode, rc.cmdline_args)
def test_containerization_settings(tmp_path, container_runtime, mocker):
mocker.patch.dict('os.environ', {'HOME': str(tmp_path)}, clear=True)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Have you considered using the built-in monkeypatch.setenv()?

https://docs.pytest.org/en/latest/how-to/monkeypatch.html

Copy link
Contributor Author

@samdoran samdoran Sep 13, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I haven't yet figured out when to use monkeypatch vs mocker.patch. I still have things to learn. :)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

out of the 3 options outlined here:

pytest-dev/pytest#4576 (comment)

I have seen a lot converging on option (2), because python2 tends to be in the rear view mirror by now. The pattern from unittest import mock already exists in ansible-runner unit tests. However, the pytest-mock you're adding here is more naturally aware of the pytest test scope, which means you don't have to indent everything in context managers. Thus, I raise no objections to it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The pattern from unittest import mock already exists in ansible-runner unit tests.

I have a stashed commit locally that removes all of that in favor of the pytest-mock plugin. It ended up being pretty huge and took me several hours to get working which is why I didn't include those changes in this PR. I'll do that in (yet another) PR.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I had a good talk with @webknjaz about monkeypach vs mocker.patch and I have a better idea of when to use each now. monkeypatch doesn't have the ability to clear the environment, but I'm not sure that is strictly necessary. I will try it out see if it works.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Technically, monkeypatch.setattr(os, 'environ', {'HOME': str(tmp_path)}) would probably emulate this. But I agree, it's usually unnecessary to clear all the other unnecessary env vars. If you expect certain env vars not to be set, use monkeypatch.delenv() for that.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

out of the 3 options outlined here:

pytest-dev/pytest#4576 (comment)

I think that what Ronny points out in the very next comment is even more important to take into account: #815 (comment).

TL;DR If you end up using any sort of mocking technique, you probably have an architectural problem. If you were to just use dependency injection, there would be exactly zero need for this.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you end up using any sort of mocking technique, you probably have an architectural problem. If you were to just use dependency injection, there would be exactly zero need for this.

I absolutely agree with this. I am huge fan of test driven development for this reason, among others. Currently, I'm just trying to improve the state of the tests. Eventually I hope to improve the code itself.

tmp_path.joinpath('.ssh').mkdir()

mock_containerized = mocker.patch('ansible_runner.config._base.BaseConfig.containerized', new_callable=PropertyMock)
mock_containerized.return_value = True

rc = BaseConfig(private_data_dir=tmp_path)
rc.ident = 'foo'
rc.cmdline_args = ['main.yaml', '-i', '/tmp/inventory']
rc.command = ['ansible-playbook'] + rc.cmdline_args
rc.process_isolation = True
rc.runner_mode = 'pexpect'
rc.process_isolation_executable = container_runtime
rc.container_image = 'my_container'
rc.container_volume_mounts = ['/host1:/container1', 'host2:/container2']
rc.execution_mode = BaseExecutionMode.ANSIBLE_COMMANDS
rc._prepare_env()
rc._handle_command_wrap(rc.execution_mode, rc.cmdline_args)

extra_container_args = []
if container_runtime == 'podman':
extra_container_args = ['--quiet']
else:
extra_container_args = ['--user={os.getuid()}']
extra_container_args = [f'--user={os.getuid()}']

expected_command_start = [
container_runtime,
'run',
'--rm',
'--tty',
'--interactive',
'--workdir',
'/runner/project',
'-v', '{}/.ssh/:/home/runner/.ssh/'.format(str(tmp_path))
]

expected_command_start = [container_runtime, 'run', '--rm', '--tty', '--interactive', '--workdir', '/runner/project'] + \
['-v', '{}/.ssh/:/home/runner/.ssh/'.format(os.environ['HOME'])]
if container_runtime == 'podman':
expected_command_start += ['--group-add=root', '--ipc=host']
expected_command_start.extend(['--group-add=root', '--ipc=host'])

expected_command_start.extend([
'-v', '{}/artifacts/:/runner/artifacts/:Z'.format(rc.private_data_dir),
'-v', '{}/:/runner/:Z'.format(rc.private_data_dir),
'--env-file', '{}/env.list'.format(rc.artifact_dir),
])

expected_command_start.extend(extra_container_args)

expected_command_start += ['-v', '{}/artifacts/:/runner/artifacts/:Z'.format(rc.private_data_dir)] + \
['-v', '{}/:/runner/:Z'.format(rc.private_data_dir)] + \
['--env-file', '{}/env.list'.format(rc.artifact_dir)] + \
extra_container_args + \
['--name', 'ansible_runner_foo'] + \
['my_container', 'ansible-playbook', 'main.yaml', '-i', '/tmp/inventory']
expected_command_start.extend([
'--name', 'ansible_runner_foo',
'my_container', 'ansible-playbook', 'main.yaml', '-i', '/tmp/inventory',
])

for index, element in enumerate(expected_command_start):
if '--user=' in element:
assert '--user=' in rc.command[index]
else:
assert rc.command[index] == element
assert expected_command_start == rc.command


@pytest.mark.parametrize(
Expand All @@ -322,9 +337,9 @@ def test_containerization_settings(tmpdir, container_runtime):
('podman', '1')
)
)
def test_containerization_unsafe_write_setting(tmpdir, container_runtime, expected):
def test_containerization_unsafe_write_setting(tmp_path, container_runtime, expected):
with patch('ansible_runner.config._base.BaseConfig.containerized', new_callable=PropertyMock) as mock_containerized:
rc = BaseConfig(private_data_dir=tmpdir)
rc = BaseConfig(private_data_dir=tmp_path)
rc.ident = 'foo'
rc.cmdline_args = ['main.yaml', '-i', '/tmp/inventory']
rc.command = ['ansible-playbook'] + rc.cmdline_args
Expand Down
Loading