From e059bc7cc5da6f447fe3c2842194c02067e4b33a Mon Sep 17 00:00:00 2001 From: Raman Gupta <7243222+raman325@users.noreply.github.com> Date: Wed, 13 Jan 2021 11:17:25 -0600 Subject: [PATCH] Add example tests (#50) --- .github/workflows/pull.yml | 26 ++++- .github/workflows/push.yml | 26 ++++- .gitignore | 5 +- .vscode/settings.json | 5 +- README.md | 8 ++ custom_components/__init__.py | 1 + .../integration_blueprint/api.py | 2 +- requirements_dev.txt | 1 + requirements_test.txt | 2 + setup.cfg | 2 +- tests/README.md | 24 ++++ tests/__init__.py | 1 + tests/conftest.py | 56 +++++++++ tests/const.py | 5 + tests/test_api.py | 86 ++++++++++++++ tests/test_config_flow.py | 110 ++++++++++++++++++ tests/test_init.py | 56 +++++++++ tests/test_switch.py | 44 +++++++ 18 files changed, 454 insertions(+), 6 deletions(-) create mode 100644 custom_components/__init__.py create mode 100644 requirements_dev.txt create mode 100644 requirements_test.txt create mode 100644 tests/README.md create mode 100644 tests/__init__.py create mode 100644 tests/conftest.py create mode 100644 tests/const.py create mode 100644 tests/test_api.py create mode 100644 tests/test_config_flow.py create mode 100644 tests/test_init.py create mode 100644 tests/test_switch.py diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index dcfb832..d895c86 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -28,4 +28,28 @@ jobs: with: python-version: "3.x" - run: python3 -m pip install black - - run: black . \ No newline at end of file + - run: black . + + tests: + runs-on: "ubuntu-latest" + name: Run tests + steps: + - name: Check out code from GitHub + uses: "actions/checkout@v2" + - name: Setup Python + uses: "actions/setup-python@v1" + with: + python-version: "3.8" + - name: Install requirements + run: python3 -m pip install -r requirements_test.txt + - name: Run tests + run: | + pytest \ + -qq \ + --timeout=9 \ + --durations=10 \ + -n auto \ + --cov custom_components.integration_blueprint \ + -o console_output_style=count \ + -p no:sugar \ + tests diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index b3c4461..d0ff7bf 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -31,4 +31,28 @@ jobs: with: python-version: "3.x" - run: python3 -m pip install black - - run: black . \ No newline at end of file + - run: black . + + tests: + runs-on: "ubuntu-latest" + name: Run tests + steps: + - name: Check out code from GitHub + uses: "actions/checkout@v2" + - name: Setup Python + uses: "actions/setup-python@v1" + with: + python-version: "3.8" + - name: Install requirements + run: python3 -m pip install -r requirements_test.txt + - name: Run tests + run: | + pytest \ + -qq \ + --timeout=9 \ + --durations=10 \ + -n auto \ + --cov custom_components.integration_blueprint \ + -o console_output_style=count \ + -p no:sugar \ + tests \ No newline at end of file diff --git a/.gitignore b/.gitignore index bbbf65d..13b6f67 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,5 @@ __pycache__ -pythonenv* \ No newline at end of file +pythonenv* +venv +.venv +.coverage diff --git a/.vscode/settings.json b/.vscode/settings.json index 3c2faa3..a3d535d 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,5 +1,8 @@ { "python.linting.pylintEnabled": true, "python.linting.enabled": true, - "python.pythonPath": "/usr/local/bin/python" + "python.pythonPath": "/usr/local/bin/python", + "files.associations": { + "*.yaml": "home-assistant" + } } \ No newline at end of file diff --git a/README.md b/README.md index d99fc8f..d3d4fee 100644 --- a/README.md +++ b/README.md @@ -34,12 +34,20 @@ File | Purpose `custom_components/integration_blueprint/manifest.json` | A [manifest file](https://developers.home-assistant.io/docs/en/creating_integration_manifest.html) for Home Assistant. `custom_components/integration_blueprint/sensor.py` | Sensor platform for the integration. `custom_components/integration_blueprint/switch.py` | Switch sensor platform for the integration. +`tests/__init__.py` | Makes the `tests` folder a module. +`tests/conftest.py` | Global [fixtures](https://docs.pytest.org/en/stable/fixture.html) used in tests to [patch](https://docs.python.org/3/library/unittest.mock.html#unittest.mock.patch) functions. +`tests/test_api.py` | Tests for `custom_components/integration_blueprint/api.py`. +`tests/test_config_flow.py` | Tests for `custom_components/integration_blueprint/config_flow.py`. +`tests/test_init.py` | Tests for `custom_components/integration_blueprint/__init__.py`. +`tests/test_switch.py` | Tests for `custom_components/integration_blueprint/switch.py`. `CONTRIBUTING.md` | Guidelines on how to contribute. `example.png` | Screenshot that demonstrate how it might look in the UI. `info.md` | An example on a info file (used by [hacs][hacs]). `LICENSE` | The license file for the project. `README.md` | The file you are reading now, should contain info about the integration, installation and configuration instructions. `requirements.txt` | Python packages used by this integration. +`requirements_dev.txt` | Python packages used to provide [IntelliSense](https://code.visualstudio.com/docs/editor/intellisense)/code hints during development of this integration, typically includes packages in `requirements.txt` but may include additional packages +`requirements_text.txt` | Python packages required to run the tests for this integration, typically includes packages in `requirements_dev.txt` but may include additional packages ## How? diff --git a/custom_components/__init__.py b/custom_components/__init__.py new file mode 100644 index 0000000..f55f54d --- /dev/null +++ b/custom_components/__init__.py @@ -0,0 +1 @@ +"""Custom components module.""" diff --git a/custom_components/integration_blueprint/api.py b/custom_components/integration_blueprint/api.py index b266c56..fa9d7c9 100644 --- a/custom_components/integration_blueprint/api.py +++ b/custom_components/integration_blueprint/api.py @@ -72,4 +72,4 @@ async def api_wrapper( exception, ) except Exception as exception: # pylint: disable=broad-except - _LOGGER.error("Something really wrong happend! - %s", exception) + _LOGGER.error("Something really wrong happened! - %s", exception) diff --git a/requirements_dev.txt b/requirements_dev.txt new file mode 100644 index 0000000..7d78f01 --- /dev/null +++ b/requirements_dev.txt @@ -0,0 +1 @@ +homeassistant diff --git a/requirements_test.txt b/requirements_test.txt new file mode 100644 index 0000000..a39a876 --- /dev/null +++ b/requirements_test.txt @@ -0,0 +1,2 @@ +-r requirements_dev.txt +pytest-homeassistant-custom-component==0.1.0 diff --git a/setup.cfg b/setup.cfg index 6e3c410..4ee3655 100644 --- a/setup.cfg +++ b/setup.cfg @@ -31,5 +31,5 @@ not_skip = __init__.py force_sort_within_sections = true sections = FUTURE,STDLIB,INBETWEENS,THIRDPARTY,FIRSTPARTY,LOCALFOLDER default_section = THIRDPARTY -known_first_party = custom_components.integration_blueprint +known_first_party = custom_components.integration_blueprint, tests combine_as_imports = true diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..90017d4 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,24 @@ +# Why? + +While tests aren't required to publish a custom component for Home Assistant, they will generally make development easier because good tests will expose when changes you want to make to the component logic will break expected functionality. Home Assistant uses [`pytest`](https://docs.pytest.org/en/latest/) for its tests, and the tests that have been included are modeled after tests that are written for core Home Assistant integrations. These tests pass with 100% coverage (unless something has changed ;) ) and have comments to help you understand the purpose of different parts of the test. + +# Getting Started + +To begin, it is recommended to create a virtual environment to install dependencies: +```bash +python3 -m venv venv +source venv/bin/activate +``` + +You can then install the dependencies that will allow you to run tests: +`pip3 install -r requirements_test.txt.` + +This will install `homeassistant`, `pytest`, and `pytest-homeassistant-custom-component`, a plugin which allows you to leverage helpers that are available in Home Assistant for core integration tests. + +# Useful commands + +Command | Description +------- | ----------- +`pytest tests/` | This will run all tests in `tests/` and tell you how many passed/failed +`pytest --durations=10 --cov-report term-missing --cov=custom_components.integration_blueprint tests` | This tells `pytest` that your target module to test is `custom_components.integration_blueprint` so that it can give you a [code coverage](https://en.wikipedia.org/wiki/Code_coverage) summary, including % of code that was executed and the line numbers of missed executions. +`pytest tests/test_init.py -k test_setup_unload_and_reload_entry` | Runs the `test_setup_unload_and_reload_entry` test function located in `tests/test_init.py` diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..0ba5e33 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +"""Tests for integration_blueprint integration.""" diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..8fb21db --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,56 @@ +"""Global fixtures for integration_blueprint integration.""" +# Fixtures allow you to replace functions with a Mock object. You can perform +# many options via the Mock to reflect a particular behavior from the original +# function that you want to see without going through the function's actual logic. +# Fixtures can either be passed into tests as parameters, or if autouse=True, they +# will automatically be used across all tests. +# +# Fixtures that are defined in conftest.py are available across all tests. You can also +# define fixtures within a particular test file to scope them locally. +# +# pytest_homeassistant_custom_component provides some fixtures that are provided by +# Home Assistant core. You can find those fixture definitions here: +# https://github.com/MatthewFlamm/pytest-homeassistant-custom-component/blob/master/pytest_homeassistant_custom_component/common.py +# +# See here for more info: https://docs.pytest.org/en/latest/fixture.html (note that +# pytest includes fixtures OOB which you can use as defined on this page) +from unittest.mock import patch + +import pytest + +pytest_plugins = "pytest_homeassistant_custom_component" + + +# This fixture is used to prevent HomeAssistant from attempting to create and dismiss persistent +# notifications. These calls would fail without this fixture since the persistent_notification +# integration is never loaded during a test. +@pytest.fixture(name="skip_notifications", autouse=True) +def skip_notifications_fixture(): + """Skip notification calls.""" + with patch("homeassistant.components.persistent_notification.async_create"), patch( + "homeassistant.components.persistent_notification.async_dismiss" + ): + yield + + +# This fixture, when used, will result in calls to async_get_data to return None. To have the call +# return a value, we would add the `return_value=` parameter to the patch call. +@pytest.fixture(name="bypass_get_data") +def bypass_get_data_fixture(): + """Skip calls to get data from API.""" + with patch( + "custom_components.integration_blueprint.IntegrationBlueprintApiClient.async_get_data" + ): + yield + + +# In this fixture, we are forcing calls to async_get_data to raise an Exception. This is useful +# for exception handling. +@pytest.fixture(name="error_on_get_data") +def error_get_data_fixture(): + """Simulate error when retrieving data from API.""" + with patch( + "custom_components.integration_blueprint.IntegrationBlueprintApiClient.async_get_data", + side_effect=Exception, + ): + yield diff --git a/tests/const.py b/tests/const.py new file mode 100644 index 0000000..83c523f --- /dev/null +++ b/tests/const.py @@ -0,0 +1,5 @@ +"""Constants for integration_blueprint tests.""" +from custom_components.integration_blueprint.const import CONF_PASSWORD, CONF_USERNAME + +# Mock config data to be used across multiple tests +MOCK_CONFIG = {CONF_USERNAME: "test_username", CONF_PASSWORD: "test_password"} diff --git a/tests/test_api.py b/tests/test_api.py new file mode 100644 index 0000000..65ab7f3 --- /dev/null +++ b/tests/test_api.py @@ -0,0 +1,86 @@ +"""Tests for integration_blueprint api.""" +import asyncio + +import aiohttp +from homeassistant.helpers.aiohttp_client import async_get_clientsession + +from custom_components.integration_blueprint.api import IntegrationBlueprintApiClient + + +async def test_api(hass, aioclient_mock, caplog): + """Test API calls.""" + + # To test the api submodule, we first create an instance of our API client + api = IntegrationBlueprintApiClient("test", "test", async_get_clientsession(hass)) + + # Use aioclient_mock which is provided by `pytest_homeassistant_custom_components` + # to mock responses to aiohttp requests. In this case we are telling the mock to + # return {"test": "test"} when a `GET` call is made to the specified URL. We then + # call `async_get_data` which will make that `GET` request. + aioclient_mock.get( + "https://jsonplaceholder.typicode.com/posts/1", json={"test": "test"} + ) + assert await api.async_get_data() == {"test": "test"} + + # We do the same for `async_set_title`. Note the difference in the mock call + # between the previous step and this one. We use `patch` here instead of `get` + # because we know that `async_set_title` calls `api_wrapper` with `patch` as the + # first parameter + aioclient_mock.patch("https://jsonplaceholder.typicode.com/posts/1") + assert await api.async_set_title("test") is None + + # In order to get 100% coverage, we need to test `api_wrapper` to test the code + # that isn't already called by `async_get_data` and `async_set_title`. Because the + # only logic that lives inside `api_wrapper` that is not being handled by a third + # party library (aiohttp) is the exception handling, we also want to simulate + # raising the exceptions to ensure that the function handles them as expected. + # The caplog fixture allows access to log messages in tests. This is particularly + # useful during exception handling testing since often the only action as part of + # exception handling is a logging statement + caplog.clear() + aioclient_mock.put( + "https://jsonplaceholder.typicode.com/posts/1", exc=asyncio.TimeoutError + ) + assert ( + await api.api_wrapper("put", "https://jsonplaceholder.typicode.com/posts/1") + is None + ) + assert ( + len(caplog.record_tuples) == 1 + and "Timeout error fetching information from" in caplog.record_tuples[0][2] + ) + + caplog.clear() + aioclient_mock.post( + "https://jsonplaceholder.typicode.com/posts/1", exc=aiohttp.ClientError + ) + assert ( + await api.api_wrapper("post", "https://jsonplaceholder.typicode.com/posts/1") + is None + ) + assert ( + len(caplog.record_tuples) == 1 + and "Error fetching information from" in caplog.record_tuples[0][2] + ) + + caplog.clear() + aioclient_mock.post("https://jsonplaceholder.typicode.com/posts/2", exc=Exception) + assert ( + await api.api_wrapper("post", "https://jsonplaceholder.typicode.com/posts/2") + is None + ) + assert ( + len(caplog.record_tuples) == 1 + and "Something really wrong happened!" in caplog.record_tuples[0][2] + ) + + caplog.clear() + aioclient_mock.post("https://jsonplaceholder.typicode.com/posts/3", exc=TypeError) + assert ( + await api.api_wrapper("post", "https://jsonplaceholder.typicode.com/posts/3") + is None + ) + assert ( + len(caplog.record_tuples) == 1 + and "Error parsing information from" in caplog.record_tuples[0][2] + ) diff --git a/tests/test_config_flow.py b/tests/test_config_flow.py new file mode 100644 index 0000000..326eb16 --- /dev/null +++ b/tests/test_config_flow.py @@ -0,0 +1,110 @@ +"""Test integration_blueprint config flow.""" +from unittest.mock import patch + +from homeassistant import config_entries, data_entry_flow +import pytest +from pytest_homeassistant_custom_component.common import MockConfigEntry + +from custom_components.integration_blueprint.const import ( + BINARY_SENSOR, + DOMAIN, + PLATFORMS, + SENSOR, + SWITCH, +) + +from .const import MOCK_CONFIG + + +# This fixture bypasses the actual setup of the integration +# since we only want to test the config flow. We test the +# actual functionality of the integration in other test modules. +@pytest.fixture(autouse=True) +def bypass_setup_fixture(): + """Prevent setup.""" + with patch( + "custom_components.integration_blueprint.async_setup", + return_value=True, + ), patch( + "custom_components.integration_blueprint.async_setup_entry", + return_value=True, + ): + yield + + +# Here we simiulate a successful config flow from the backend. +# Note that we use the `bypass_get_data` fixture here because +# we want the config flow validation to succeed during the test. +async def test_successful_config_flow(hass, bypass_get_data): + """Test a successful config flow.""" + # Initialize a config flow + result = await hass.config_entries.flow.async_init( + DOMAIN, context={"source": config_entries.SOURCE_USER} + ) + + # Check that the config flow shows the user form as the first step + assert result["type"] == data_entry_flow.RESULT_TYPE_FORM + assert result["step_id"] == "user" + + # If a user were to enter `test_username` for username and `test_password` + # for password, it would result in this function call + result = await hass.config_entries.flow.async_configure( + result["flow_id"], user_input=MOCK_CONFIG + ) + + # Check that the config flow is complete and a new entry is created with + # the input data + assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY + assert result["title"] == "test_username" + assert result["data"] == MOCK_CONFIG + assert result["result"] + + +# In this case, we want to simulate a failure during the config flow. +# We use the `error_on_get_data` mock instead of `bypass_get_data` +# (note the function parameters) to raise an Exception during +# validation of the input config. +async def test_failed_config_flow(hass, error_on_get_data): + """Test a failed config flow due to credential validation failure.""" + result = await hass.config_entries.flow.async_init( + DOMAIN, context={"source": config_entries.SOURCE_USER} + ) + + assert result["type"] == data_entry_flow.RESULT_TYPE_FORM + assert result["step_id"] == "user" + + result = await hass.config_entries.flow.async_configure( + result["flow_id"], user_input=MOCK_CONFIG + ) + + assert result["type"] == data_entry_flow.RESULT_TYPE_FORM + assert result["errors"] == {"base": "auth"} + + +# Our config flow also has an options flow, so we must test it as well. +async def test_options_flow(hass): + """Test an options flow.""" + # Create a new MockConfigEntry and add to HASS (we're bypassing config + # flow entirely) + entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG, entry_id="test") + entry.add_to_hass(hass) + + # Initialize an options flow + result = await hass.config_entries.options.async_init(entry.entry_id) + + # Verify that the first options step is a user form + assert result["type"] == data_entry_flow.RESULT_TYPE_FORM + assert result["step_id"] == "user" + + # Enter some fake data into the form + result = await hass.config_entries.options.async_configure( + result["flow_id"], + user_input={platform: platform != SENSOR for platform in PLATFORMS}, + ) + + # Verify that the flow finishes + assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY + assert result["title"] == "test_username" + + # Verify that the options were updated + assert entry.options == {BINARY_SENSOR: True, SENSOR: False, SWITCH: True} diff --git a/tests/test_init.py b/tests/test_init.py new file mode 100644 index 0000000..4d46ec4 --- /dev/null +++ b/tests/test_init.py @@ -0,0 +1,56 @@ +"""Test integration_blueprint setup process.""" +from homeassistant.exceptions import ConfigEntryNotReady +import pytest +from pytest_homeassistant_custom_component.common import MockConfigEntry + +from custom_components.integration_blueprint import ( + BlueprintDataUpdateCoordinator, + async_reload_entry, + async_setup_entry, + async_unload_entry, +) +from custom_components.integration_blueprint.const import DOMAIN + +from .const import MOCK_CONFIG + + +# We can pass fixtures as defined in conftest.py to tell pytest to use the fixture +# for a given test. We can also leverage fixtures and mocks that are available in +# Home Assistant using the pytest_homeassistant_custom_component plugin. +# Assertions allow you to verify that the return value of whatever is on the left +# side of the assertion matches with the right side. +async def test_setup_unload_and_reload_entry(hass, bypass_get_data): + """Test entry setup and unload.""" + # Create a mock entry so we don't have to go through config flow + config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG, entry_id="test") + + # Set up the entry and assert that the values set during setup are where we expect + # them to be. Because we have patched the BlueprintDataUpdateCoordinator.async_get_data + # call, no code from custom_components/integration_blueprint/api.py actually runs. + assert await async_setup_entry(hass, config_entry) + assert DOMAIN in hass.data and config_entry.entry_id in hass.data[DOMAIN] + assert ( + type(hass.data[DOMAIN][config_entry.entry_id]) == BlueprintDataUpdateCoordinator + ) + + # Reload the entry and assert that the data from above is still there + assert await async_reload_entry(hass, config_entry) is None + assert DOMAIN in hass.data and config_entry.entry_id in hass.data[DOMAIN] + assert ( + type(hass.data[DOMAIN][config_entry.entry_id]) == BlueprintDataUpdateCoordinator + ) + + # Unload the entry and verify that the data has been removed + assert await async_unload_entry(hass, config_entry) + assert config_entry.entry_id not in hass.data[DOMAIN] + + +async def test_setup_entry_exception(hass, error_on_get_data): + """Test ConfigEntryNotReady when API raises an exception during entry setup.""" + config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG, entry_id="test") + + # In this case we are testing the condition where async_setup_entry raises + # ConfigEntryNotReady using the `error_on_get_data` fixture which simulates + # an error. + with pytest.raises(ConfigEntryNotReady): + assert await async_setup_entry(hass, config_entry) diff --git a/tests/test_switch.py b/tests/test_switch.py new file mode 100644 index 0000000..a48d58e --- /dev/null +++ b/tests/test_switch.py @@ -0,0 +1,44 @@ +"""Test integration_blueprint switch.""" +from unittest.mock import call, patch + +from homeassistant.components.switch import SERVICE_TURN_OFF, SERVICE_TURN_ON +from homeassistant.const import ATTR_ENTITY_ID +from pytest_homeassistant_custom_component.common import MockConfigEntry + +from custom_components.integration_blueprint import async_setup_entry +from custom_components.integration_blueprint.const import DEFAULT_NAME, DOMAIN, SWITCH + +from .const import MOCK_CONFIG + + +async def test_switch_services(hass): + """Test switch services.""" + # Create a mock entry so we don't have to go through config flow + config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG, entry_id="test") + assert await async_setup_entry(hass, config_entry) + await hass.async_block_till_done() + + # Functions/objects can be patched directly in test code as well and can be used to test + # additional things, like whether a function was called or what arguments it was called with + with patch( + "custom_components.integration_blueprint.IntegrationBlueprintApiClient.async_set_title" + ) as title_func: + await hass.services.async_call( + SWITCH, + SERVICE_TURN_OFF, + service_data={ATTR_ENTITY_ID: f"{SWITCH}.{DEFAULT_NAME}_{SWITCH}"}, + blocking=True, + ) + assert title_func.called + assert title_func.call_args == call("foo") + + title_func.reset_mock() + + await hass.services.async_call( + SWITCH, + SERVICE_TURN_ON, + service_data={ATTR_ENTITY_ID: f"{SWITCH}.{DEFAULT_NAME}_{SWITCH}"}, + blocking=True, + ) + assert title_func.called + assert title_func.call_args == call("bar")