diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 9d586868b2..0000000000 --- a/.flake8 +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -max-line-length = 100 - diff --git a/.github/workflows/test-linux-windows.yml b/.github/workflows/test-linux-windows.yml index da127e5382..5881b782a9 100644 --- a/.github/workflows/test-linux-windows.yml +++ b/.github/workflows/test-linux-windows.yml @@ -40,8 +40,8 @@ jobs: - name: Run tests run: pytest -rf test/ - - name: check linting + - name: Check linting run: | # E203 and W503 don't work well with black - flake8 parsons/ test/ useful_resources/ --extend-ignore=E203,W503 + flake8 parsons/ test/ useful_resources/ black --check parsons/ test/ useful_resources/ diff --git a/.github/workflows/tests-mac.yml b/.github/workflows/tests-mac.yml index 479ca7dd2b..caeda0d0aa 100644 --- a/.github/workflows/tests-mac.yml +++ b/.github/workflows/tests-mac.yml @@ -37,10 +37,10 @@ jobs: python -m pip install -r requirements-dev.txt - name: Run tests - run: TESTING=1 pytest -rf test/ + run: pytest - - name: check linting + - name: Check linting run: | # E203 and W503 don't work well with black - flake8 parsons/ test/ useful_resources/ --extend-ignore=E203,W503 + flake8 parsons/ test/ useful_resources/ black --check parsons/ test/ useful_resources/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7376c5aca1..0c4ead752e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,15 +1,13 @@ repos: - repo: https://github.com/pycqa/flake8 - rev: 6.1.0 + rev: 7.0.0 hooks: - id: flake8 - language_version: python3 args: [ - '--extend-ignore=E203,W503', - '--max-line-length=100' - ] + '--extend-ignore=E203,W503', + '--max-line-length=100' + ] - repo: https://github.com/psf/black - rev: 22.3.0 + rev: 24.2.0 hooks: - id: black - language_version: python3 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 68c8866f0b..8c3a9f7f68 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,10 @@ We're thrilled that you're thinking about contributing to Parsons! Welcome to ou You can find a detailed version of this guide [on our website](https://www.parsonsproject.org/pub/contributing-guide/). -The best way to get involved is by joining our Slack. To join, email engineering@movementcooperative.org. In addition to all the great discussions that happen on our Slack, we also have virtual events including trainings, pairing sessions, social hangouts, discussions, and more. Every other Thursday afternoon we host 🎉 Parsons Parties 🎉 on Zoom where we work on contributions together. +The best way to get involved is by joining our Slack. To join, email engineering@movementcooperative.org. In addition to +all the great discussions that happen on our Slack, we also have virtual events including trainings, pairing sessions, +social hangouts, discussions, and more. Every other Thursday afternoon we host 🎉 Parsons Parties 🎉 on Zoom where we work +on contributions together. You can contribute by: @@ -19,21 +22,31 @@ We encourage folks to review existing issues before starting a new issue. * If you have code snippets, but don’t have time to do the full write, please add to the issue! We use labels to help us classify issues. They include: + * **bug** - something in Parsons isn’t working the way it should * **enhancement** - new feature or request (e.g. a new API connector) * **good first issue** - an issue that would be good for someone who is new to Parsons ## Contributing Code to Parsons -Generally, code contributions to Parsons will be either enhancements or bug requests (or contributions of [sample code](#sample-code), discussed below). All changes to the repository are made [via pull requests](#submitting-a-pull-request). +Generally, code contributions to Parsons will be either enhancements or bug requests (or contributions +of [sample code](#sample-code), discussed below). All changes to the repository are +made [via pull requests](#submitting-a-pull-request). -If you would like to contribute code to Parsons, please review the issues in the repository and find one you would like to work on. If you are new to Parsons or to open source projects, look for issues with the [**good first issue**](https://github.com/move-coop/parsons/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) label. Once you have found your issue, please add a comment to the issue that lets others know that you are interested in working on it. If you're having trouble finding something to work on, please ask us for help on Slack. +If you would like to contribute code to Parsons, please review the issues in the repository and find one you would like +to work on. If you are new to Parsons or to open source projects, look for issues with the [**good first issue +**](https://github.com/move-coop/parsons/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) label. Once you +have found your issue, please add a comment to the issue that lets others know that you are interested in working on it. +If you're having trouble finding something to work on, please ask us for help on Slack. -The bulk of Parsons is made up of Connector classes, which are Python classes that help move data in and out of third party services. When you feel ready, you may want to contribute by [adding a new Connector class](https://move-coop.github.io/parsons/html/build_a_connector.html). +The bulk of Parsons is made up of Connector classes, which are Python classes that help move data in and out of third +party services. When you feel ready, you may want to contribute +by [adding a new Connector class](https://move-coop.github.io/parsons/html/build_a_connector.html). ### Making Changes to Parsons -To make code changes to Parsons, you'll need to set up your development environment, make your changes, and then submit a pull request. +To make code changes to Parsons, you'll need to set up your development environment, make your changes, and then submit +a pull request. To set up your development environment: @@ -43,57 +56,70 @@ To set up your development environment: * Install the [dependencies](#installing-dependencies) * Check that everything's working by [running the unit tests](#unit-tests) and the [linter](#linting) -Now it's time to make your changes. We suggest taking a quick look at our [coding conventions](#coding-conventions) - it'll make the review process easier down the line. In addition to any code changes, make sure to update the documentation and the unit tests if necessary. Not sure if your changes require test or documentation updates? Just ask in Slack or through a comment on the relevant issue. When you're done, make sure to run the [unit tests](#unit-tests) and the [linter](#linting) again. +Now it's time to make your changes. We suggest taking a quick look at our [coding conventions](#coding-conventions) - +it'll make the review process easier down the line. In addition to any code changes, make sure to update the +documentation and the unit tests if necessary. Not sure if your changes require test or documentation updates? Just ask +in Slack or through a comment on the relevant issue. When you're done, make sure to run the [unit tests](#unit-tests) +and the [linter](#linting) again. Finally, you'll want to [submit a pull request](#submitting-a-pull-request). And that's it! #### Virtual Environments -If required dependencies conflict with packages or modules you need for other projects, you can create and use a [virtual environment](https://docs.python.org/3/library/venv.html). +If required dependencies conflict with packages or modules you need for other projects, you can create and use +a [virtual environment](https://docs.python.org/3/library/venv.html). ``` -python3 -m venv .venv # Creates a virtual environment in the .venv folder -source .venv/bin/activate # Activate in Unix or MacOS -.venv/Scripts/activate.bat # Activate in Windows +> python3 -m venv .venv # Creates a virtual environment in the .venv folder +> source .venv/bin/activate # Activate in Unix or MacOS +> .venv/Scripts/activate.bat # Activate in Windows ``` #### Installing Dependencies -Before running or testing your code changes, be sure to install all of the required Python libraries that Parsons depends on. +Before running or testing your code changes, be sure to install all of the required Python libraries that Parsons +depends on. From the root of the parsons repository, use the run the following command: ```bash > pip install -r requirements.txt +> pip install -r requirements-dev.txt ``` #### Unit Tests -When contributing code, we ask you to add to tests that can be used to verify that the code is working as expected. All of our unit tests are located in the `test/` folder at the root of the repository. +When contributing code, we ask you to add to tests that can be used to verify that the code is working as expected. All +of our unit tests are located in the `test/` folder at the root of the repository. -We use the pytest tool to run our suite of automated unit tests. The pytest command line tool is installed as part of the Parsons dependencies. +We use the pytest tool to run our suite of automated unit tests. The pytest command line tool is installed as part of +the Parsons dependencies. To run all the entire suite of unit tests, execute the following command: ```bash -> pytest -rf test/ +> pytest ``` -Once the pytest tool has finished running all of the tests, it will output details around any errors or test failures it encountered. If no failures are identified, then you are good to go! +Once the pytest tool has finished running all of the tests, it will output details around any errors or test failures it +encountered. If no failures are identified, then you are good to go! -**Note:*** Some tests are written to call out to external API’s, and will be skipped as part of standard unit testing. This is expected. +**Note:*** Some tests are written to call out to external API’s, and will be skipped as part of standard unit testing. +This is expected. See the [pytest documentation](https://docs.pytest.org/en/latest/contents.html) for more info and many more options. #### Linting -We use the [black](https://github.com/psf/black) and [flake8](http://flake8.pycqa.org/en/latest/) tools to [lint](https://en.wikipedia.org/wiki/Lint_(software)) the code in the repository to make sure it matches our preferred style. Both tools are installed as part of the Parsons dependencies. +We use the [black](https://github.com/psf/black) and [flake8](http://flake8.pycqa.org/en/latest/) tools +to [lint](https://en.wikipedia.org/wiki/Lint_(software)) the code in the repository to make sure it matches our +preferred style. Both tools are installed as part of the Parsons dependencies. Run the following commands from the root of the Parsons repository to lint your code changes: ```bash -> flake8 --max-line-length=100 --extend-ignore=E203,W503 parsons -> black parsons +> flake8 parsons/ test/ useful_resources/ +> black parsons/ test/ useful_resources/ ``` Pre-commit hooks are available to enforce black and isort formatting on @@ -107,13 +133,17 @@ pre-commit`, and then run `pre-commit install`. The following is a list of best practices to consider when writing code for the Parsons project: -* Each tool connector should be its own unique class (e.g. ActionKit, VAN) in its own Python package. Use existing connectors as examples when deciding how to layout your code. +* Each tool connector should be its own unique class (e.g. ActionKit, VAN) in its own Python package. Use existing + connectors as examples when deciding how to layout your code. * Methods should be named using a verb_noun structure, such as `get_activist()` or `update_event()`. -* Methods should reflect the vocabulary utilized by the original tool where possible to mantain transparency. For example, Google Cloud Storage refers to file like objects as blobs. The methods are called `get_blob()` rather than `get_file()`. +* Methods should reflect the vocabulary utilized by the original tool where possible to mantain transparency. For + example, Google Cloud Storage refers to file like objects as blobs. The methods are called `get_blob()` rather + than `get_file()`. -* Methods that can work with arbitrarily large data (e.g. database or API queries) should use of Parson Tables to hold the data instead of standard Python collections (e.g. lists, dicts). +* Methods that can work with arbitrarily large data (e.g. database or API queries) should use of Parson Tables to hold + the data instead of standard Python collections (e.g. lists, dicts). * You should avoid abbreviations for method names and variable names where possible. @@ -121,34 +151,45 @@ The following is a list of best practices to consider when writing code for the * Capitalize the word Parsons for consistency where possible, especially in documentation. -If you are building a new connector or extending an existing connector, there are more best practices in the [How to Build a Connector](https://move-coop.github.io/parsons/html/build_a_connector.html) documentation. +If you are building a new connector or extending an existing connector, there are more best practices in +the [How to Build a Connector](https://move-coop.github.io/parsons/html/build_a_connector.html) documentation. ## Documentation -Parsons documentation is built using the Python Sphinx tool. Sphinx uses the `docs/*.rst` files in the repository to create the documentation. +Parsons documentation is built using the Python Sphinx tool. Sphinx uses the `docs/*.rst` files in the repository to +create the documentation. -We have a [documentation label](https://github.com/move-coop/parsons/issues?q=is%3Aissue+is%3Aopen+label%3Adocumentation) that may help you find good docs issues to work on. If you are adding a new connector, you will need to add a reference to the connector to one of the .rst files. Please use the existing documentation as an example. +We have +a [documentation label](https://github.com/move-coop/parsons/issues?q=is%3Aissue+is%3Aopen+label%3Adocumentation) that +may help you find good docs issues to work on. If you are adding a new connector, you will need to add a reference to +the connector to one of the .rst files. Please use the existing documentation as an example. -When editing documentation, make sure you are editing the source files (with .md or .rst extension) and not the build files (.html extension). +When editing documentation, make sure you are editing the source files (with .md or .rst extension) and not the build +files (.html extension). The workflow for documentation changes is a bit simpler than for code changes: * Fork the Parsons project using [the “Fork” button in GitHub](https://guides.github.com/activities/forking/) * Clone your fork to your local computer -* Change into the `docs` folder and install the requirements with `pip install -r requirements.txt` (you may want to set up a [virtual environment](#virtual-environments) first) -* Make your changes and re-build the docs by running `make html`. (Note: this builds only a single version of the docs, from the current files. To create docs with multiple versions like our publicly hosted docs, run `make deploy_docs`.) +* Change into the `docs` folder and install the requirements with `pip install -r requirements.txt` (you may want to set + up a [virtual environment](#virtual-environments) first) +* Make your changes and re-build the docs by running `make html`. (Note: this builds only a single version of the docs, + from the current files. To create docs with multiple versions like our publicly hosted docs, run `make deploy_docs`.) * Open these files in your web browser to check that they look as you expect. * [Submit a pull request](#submitting-a-pull-request) -When you make documentation changes, you only need to track the source files with git. The docs built by the html folder should not be included. +When you make documentation changes, you only need to track the source files with git. The docs built by the html folder +should not be included. You should not need to worry about the unit tests or the linter if you are making documentation changes only. ## Contributing Sample Code -One important way to contribute to the Parsons project is to submit sample code that provides recipes and patterns for how to use the Parsons library. +One important way to contribute to the Parsons project is to submit sample code that provides recipes and patterns for +how to use the Parsons library. -We have a folder called `useful_resources/` in the root of the repository. If you have scripts that incorporate Parsons, we encourage you to add them there! +We have a folder called `useful_resources/` in the root of the repository. If you have scripts that incorporate Parsons, +we encourage you to add them there! The workflow for adding sample code is: @@ -161,8 +202,12 @@ You should not need to worry about the unit tests or the linter if you are only ## Submitting a Pull Request -To submit a pull request, follow [these instructions to create a Pull Request from your fork](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) back to the original Parsons repository. +To submit a pull request, +follow [these instructions to create a Pull Request from your fork](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) +back to the original Parsons repository. -The Parsons team will review your pull request and provide feedback. Please feel free to ping us if no one's responded to your Pull Request after a few days. We may not be able to review it right away, but we should be able to tell you when we'll get to it. +The Parsons team will review your pull request and provide feedback. Please feel free to ping us if no one's responded +to your Pull Request after a few days. We may not be able to review it right away, but we should be able to tell you +when we'll get to it. Once your pull request has been approved, the Parsons team will merge your changes into the Parsons repository diff --git a/README.md b/README.md index c89506083c..3b31c4ea88 100644 --- a/README.md +++ b/README.md @@ -1,51 +1,66 @@ # Parsons + [![Downloads](https://pepy.tech/badge/parsons)](https://pepy.tech/project/parsons) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/parsons)](https://pypi.org/project/parsons/) [![PyPI](https://img.shields.io/pypi/v/parsons?color=blue)](https://pypi.org/project/parsons/) [![CircleCI](https://circleci.com/gh/move-coop/parsons/tree/main.svg?style=shield)](https://circleci.com/gh/move-coop/parsons/tree/main) -A Python package that provides a simple interface to a variety of utilities and tools frequently used by progressive organizations, political and issue campaigns, activists, and other allied actors. +A Python package that provides a simple interface to a variety of utilities and tools frequently used by progressive +organizations, political and issue campaigns, activists, and other allied actors. -Parsons offers simplified interactions with these services and tools, including a growing number of CRMs, organizing tools, cloud compute service providers, as well as tools to easily transform data in transit. +Parsons offers simplified interactions with these services and tools, including a growing number of CRMs, organizing +tools, cloud compute service providers, as well as tools to easily transform data in transit. -This project is maintained by [The Movement Cooperative](https://movementcooperative.org/) and is named after [Lucy Parsons](https://en.wikipedia.org/wiki/Lucy_Parsons). The Movement Cooperative is a member-led organization focused on providing data, tools, and strategic support for the progressive community. +This project is maintained by [The Movement Cooperative](https://movementcooperative.org/) and is named +after [Lucy Parsons](https://en.wikipedia.org/wiki/Lucy_Parsons). The Movement Cooperative is a member-led organization +focused on providing data, tools, and strategic support for the progressive community. Parsons is only supported for Python 3.8-10. ## Table of Contents + - [License and Usage](#license-and-usage) - [Documentation](#documentation) - [Installation](#installation) - [Quickstart](#quickstart) -- [Community](#community) +- [Community](#community) ## License and Usage -Usage of Parsons is governed by a [modified Apache License with author attribution statement](https://github.com/move-coop/parsons/blob/main/LICENSE.md). + +Usage of Parsons is governed by +a [modified Apache License with author attribution statement](https://github.com/move-coop/parsons/blob/main/LICENSE.md). ## Documentation -To gain a full understanding of all of the features of Parsons, please review the Parsons [documentation](https://move-coop.github.io/parsons/html/index.html). -## Installation +To gain a full understanding of all of the features of Parsons, please review the +Parsons [documentation](https://move-coop.github.io/parsons/html/index.html). +## Installation ### PYPI -You can install the most recent release by running: `pip install parsons[all]` +You can install the most recent release by running: `pip install parsons[all]` ### Install from Github -To access the most recent code base that may contain features not yet included in the latest release, download this repository and then run `python setup.py develop`. +To access the most recent code base that may contain features not yet included in the latest release, download this +repository and then run `python setup.py develop`. ### Docker Container -We have a Parsons Docker container hosted on [DockerHub](https://hub.docker.com/r/movementcooperative/parsons) for each release of Parsons, including the `latest`. + +We have a Parsons Docker container hosted on [DockerHub](https://hub.docker.com/r/movementcooperative/parsons) for each +release of Parsons, including the `latest`. ## Quickstart -For this Quickstart, we are looking to generate a list of voters with cell phones using a [dummy data file](docs/quickstart.csv). We use the `assert` statements to verify that the data has been loaded correctly. +For this Quickstart, we are looking to generate a list of voters with cell phones using +a [dummy data file](docs/quickstart.csv). We use the `assert` statements to verify that the data has been loaded +correctly. ```python # Download the Census data from the Parsons GitHub repository from parsons import GitHub + github = GitHub() dummy_data = github.download_table('move-coop/parsons', 'docs/quickstart.csv') assert dummy_data.num_rows == 1000 # Check that we got all 1,000 people @@ -56,7 +71,7 @@ assert people_with_cell_phones.num_rows == 498 # Check that we filtered down to # Extract only the columns we need (first name, last name, phone number) people_with_cell_phones = people_with_cell_phones.cut('first_name', 'last_name', 'phone_number') -assert people_with_cell_phones.columns == ['first_name', 'last_name', 'phone_number'] # Check columns +assert people_with_cell_phones.columns == ['first_name', 'last_name', 'phone_number'] # Check columns # Output the list to a local CSV file filename = people_with_cell_phones.to_csv() # filename will be the path to the local CSV file @@ -64,12 +79,18 @@ filename = people_with_cell_phones.to_csv() # filename will be the path to the # In order to upload data to a Google Sheet, you will need to set the GOOGLE_DRIVE_CREDENTIALS # environment variable from parsons import GoogleSheets + sheets = GoogleSheets() sheet_id = sheets.create_spreadsheet('Voter Cell Phones') sheets.append_to_sheet(sheet_id, people_with_cell_phones) ``` ## Community -We hope to foster a strong and robust community of individuals who use and contribute to further development. Individuals are encouraged to submit issues with bugs, suggestions and feature requests. [Here](https://github.com/move-coop/parsons/blob/main/CONTRIBUTING.md) are the guidelines and best practices for contributing to Parsons. -You can also stay up to date by joining the Parsons Slack group, an active community of Parsons contributors and progressive data engineers. For an invite, just reach out to engineering+parsons@movementcooperative.org! +We hope to foster a strong and robust community of individuals who use and contribute to further development. +Individuals are encouraged to submit issues with bugs, suggestions and feature +requests. [Here](https://github.com/move-coop/parsons/blob/main/CONTRIBUTING.md) are the guidelines and best practices +for contributing to Parsons. + +You can also stay up to date by joining the Parsons Slack group, an active community of Parsons contributors and +progressive data engineers. For an invite, just reach out to engineering+parsons@movementcooperative.org! diff --git a/parsons/__init__.py b/parsons/__init__.py index 66ebf85d47..dd52b4fbc7 100644 --- a/parsons/__init__.py +++ b/parsons/__init__.py @@ -93,9 +93,7 @@ ("parsons.zoom.zoom", "Zoom"), ): try: - globals()[connector_name] = getattr( - importlib.import_module(module_path), connector_name - ) + globals()[connector_name] = getattr(importlib.import_module(module_path), connector_name) __all__.append(connector_name) except ImportError: logger.debug(f"Could not import {module_path}.{connector_name}; skipping") diff --git a/parsons/actblue/actblue.py b/parsons/actblue/actblue.py index a6d9f57281..b5981319db 100644 --- a/parsons/actblue/actblue.py +++ b/parsons/actblue/actblue.py @@ -31,18 +31,11 @@ class ActBlue(object): visit https://secure.actblue.com/docs/csv_api#authentication. """ - def __init__( - self, actblue_client_uuid=None, actblue_client_secret=None, actblue_uri=None - ): - self.actblue_client_uuid = check_env.check( - "ACTBLUE_CLIENT_UUID", actblue_client_uuid - ) - self.actblue_client_secret = check_env.check( - "ACTBLUE_CLIENT_SECRET", actblue_client_secret - ) + def __init__(self, actblue_client_uuid=None, actblue_client_secret=None, actblue_uri=None): + self.actblue_client_uuid = check_env.check("ACTBLUE_CLIENT_UUID", actblue_client_uuid) + self.actblue_client_secret = check_env.check("ACTBLUE_CLIENT_SECRET", actblue_client_secret) self.uri = ( - check_env.check("ACTBLUE_URI", actblue_uri, optional=True) - or ACTBLUE_API_ENDPOINT + check_env.check("ACTBLUE_URI", actblue_uri, optional=True) or ACTBLUE_API_ENDPOINT ) self.headers = { "accept": "application/json", @@ -86,9 +79,7 @@ def post_request(self, csv_type=None, date_range_start=None, date_range_end=None "date_range_start": date_range_start, "date_range_end": date_range_end, } - logger.info( - f"Requesting {csv_type} from {date_range_start} up to {date_range_end}." - ) + logger.info(f"Requesting {csv_type} from {date_range_start} up to {date_range_end}.") response = self.client.post_request(url="csvs", json=body) return response @@ -160,9 +151,7 @@ def get_contributions(self, csv_type, date_range_start, date_range_end): Contents of the generated contribution CSV as a Parsons table. """ - post_request_response = self.post_request( - csv_type, date_range_start, date_range_end - ) + post_request_response = self.post_request(csv_type, date_range_start, date_range_end) csv_id = post_request_response["id"] download_url = self.poll_for_download_url(csv_id) table = Table.from_csv(download_url) diff --git a/parsons/action_builder/action_builder.py b/parsons/action_builder/action_builder.py index b2529e3dd9..5249213a7b 100644 --- a/parsons/action_builder/action_builder.py +++ b/parsons/action_builder/action_builder.py @@ -59,9 +59,7 @@ def _get_page(self, campaign, object_name, page, per_page=25, filter=None): return self.api.get_request(url=url, params=params) - def _get_all_records( - self, campaign, object_name, limit=None, per_page=25, filter=None - ): + def _get_all_records(self, campaign, object_name, limit=None, per_page=25, filter=None): # Returns a list of entries for a given object, such as people, tags, or connections. # See Action Builder API docs for more: https://www.actionbuilder.org/docs/v1/index.html @@ -72,9 +70,7 @@ def _get_all_records( # Keep getting the next page until record limit is exceeded or an empty result returns while True: # Get this page and increase page number to the next one - response = self._get_page( - campaign, object_name, page, per_page, filter=filter - ) + response = self._get_page(campaign, object_name, page, per_page, filter=filter) page = page + 1 # Check that there's actually data @@ -237,9 +233,7 @@ def update_entity_record(self, identifier, data, campaign=None): identifier = [identifier] # Default to assuming identifier comes from Action Builder and add prefix if missing - identifiers = [ - f"action_builder:{id}" if ":" not in id else id for id in identifier - ] + identifiers = [f"action_builder:{id}" if ":" not in id else id for id in identifier] if not isinstance(data, dict): data = {} @@ -252,9 +246,7 @@ def update_entity_record(self, identifier, data, campaign=None): return self._upsert_entity(data=data, campaign=campaign) - def add_section_field_values_to_record( - self, identifier, section, field_values, campaign=None - ): + def add_section_field_values_to_record(self, identifier, section, field_values, campaign=None): """ Add one or more tags (i.e. custom field value) to an existing entity record in Action Builder. The tags, along with their field and section, must already exist (except for @@ -285,9 +277,7 @@ def add_section_field_values_to_record( data = {"add_tags": tag_data} - return self.update_entity_record( - identifier=identifier, data=data, campaign=campaign - ) + return self.update_entity_record(identifier=identifier, data=data, campaign=campaign) def remove_tagging( self, @@ -329,9 +319,7 @@ def remove_tagging( raise ValueError("Please supply a tag_name or tag_id!") if {identifier, tagging_id} == {None}: - raise ValueError( - "Please supply an entity or connection identifier, or a tagging id!" - ) + raise ValueError("Please supply an entity or connection identifier, or a tagging id!") campaign = self._campaign_check(campaign) endpoint = "tags/{}/taggings" @@ -354,10 +342,11 @@ def remove_tagging( if tag_id and not tagging_id: taggings = self._get_all_records(self.campaign, endpoint.format(tag_id)) taggings_filtered = taggings.select_rows( - lambda row: identifier - in row["_links"]["action_builder:connection"]["href"] - if row["item_type"] == "connection" - else identifier in row["osdi:person"]["href"] + lambda row: ( + identifier in row["_links"]["action_builder:connection"]["href"] + if row["item_type"] == "connection" + else identifier in row["osdi:person"]["href"] + ) ) tagging_id = [ x.split(":")[1] @@ -370,9 +359,7 @@ def remove_tagging( f"campaigns/{campaign}/{endpoint.format(tag_id)}/{tagging_id}" ) - def upsert_connection( - self, identifiers, tag_data=None, campaign=None, reactivate=True - ): + def upsert_connection(self, identifiers, tag_data=None, campaign=None, reactivate=True): """ Load or update a connection record in Action Builder between two existing entity records. Only one connection record is allowed per pair of entities, so if the connection already @@ -457,9 +444,7 @@ def deactivate_connection( # Check that either connection or second entity identifier are provided if {connection_identifier, to_identifier} == {None}: - raise ValueError( - "Must provide a connection ID or an ID for the second entity" - ) + raise ValueError("Must provide a connection ID or an ID for the second entity") campaign = self._campaign_check(campaign) diff --git a/parsons/action_kit/action_kit.py b/parsons/action_kit/action_kit.py index 0e32acf88d..17bdc4f1db 100644 --- a/parsons/action_kit/action_kit.py +++ b/parsons/action_kit/action_kit.py @@ -172,9 +172,7 @@ def update_user(self, user_id, **kwargs): ``None`` """ - resp = self.conn.patch( - self._base_endpoint("user", user_id), data=json.dumps(kwargs) - ) + resp = self.conn.patch(self._base_endpoint("user", user_id), data=json.dumps(kwargs)) logger.info(f"{resp.status_code}: {user_id}") def get_event(self, event_id): @@ -229,9 +227,7 @@ def update_event(self, event_id, **kwargs): ``None`` """ - resp = self.conn.patch( - self._base_endpoint("event", event_id), data=json.dumps(kwargs) - ) + resp = self.conn.patch(self._base_endpoint("event", event_id), data=json.dumps(kwargs)) logger.info(f"{resp.status_code}: {event_id}") def get_blackholed_email(self, email): @@ -635,9 +631,7 @@ def copy_mailer(self, mailer_id): copy a mailer returns new copy of mailer which should be updatable. """ - resp = self.conn.post( - self._base_endpoint("mailer", entity_id=mailer_id) + "/copy" - ) + resp = self.conn.post(self._base_endpoint("mailer", entity_id=mailer_id) + "/copy") return resp def update_mailing(self, mailer_id, **kwargs): @@ -655,9 +649,7 @@ def update_mailing(self, mailer_id, **kwargs): ``HTTP response from the patch request`` """ - resp = self.conn.patch( - self._base_endpoint("mailer", mailer_id), data=json.dumps(kwargs) - ) + resp = self.conn.patch(self._base_endpoint("mailer", mailer_id), data=json.dumps(kwargs)) logger.info(f"{resp.status_code}: {mailer_id}") return resp @@ -836,9 +828,7 @@ def update_order(self, order_id, **kwargs): ``None`` """ - resp = self.conn.patch( - self._base_endpoint("order", order_id), data=json.dumps(kwargs) - ) + resp = self.conn.patch(self._base_endpoint("order", order_id), data=json.dumps(kwargs)) logger.info(f"{resp.status_code}: {order_id}") def get_orderrecurring(self, orderrecurring_id): @@ -869,9 +859,7 @@ def cancel_orderrecurring(self, recurring_id): ``None`` """ - resp = self.conn.post( - self._base_endpoint("orderrecurring", str(recurring_id) + "/cancel") - ) + resp = self.conn.post(self._base_endpoint("orderrecurring", str(recurring_id) + "/cancel")) logger.info(f"{resp.status_code}: {recurring_id}") return resp @@ -1243,13 +1231,7 @@ def bulk_upload_table( results = [] for tbl in upload_tables: user_fields_only = int( - not any( - [ - h - for h in tbl.columns - if h != "email" and not h.startswith("user_") - ] - ) + not any([h for h in tbl.columns if h != "email" and not h.startswith("user_")]) ) results.append( self.bulk_upload_csv( @@ -1266,9 +1248,9 @@ def _split_tables_no_empties(self, table, no_overwrite_on_empty, set_only_column # uploading combo of user_id and email column should be mutually exclusive blank_columns_test = table.columns if not no_overwrite_on_empty: - blank_columns_test = set( - ["user_id", "email"] + (set_only_columns or []) - ).intersection(table.columns) + blank_columns_test = set(["user_id", "email"] + (set_only_columns or [])).intersection( + table.columns + ) for row in table: blanks = tuple(k for k in blank_columns_test if row.get(k) in (None, "")) grp = table_groups.setdefault(blanks, []) diff --git a/parsons/action_network/action_network.py b/parsons/action_network/action_network.py index ebcff10c98..eb32596fcf 100644 --- a/parsons/action_network/action_network.py +++ b/parsons/action_network/action_network.py @@ -97,9 +97,7 @@ def get_advocacy_campaign(self, advocacy_campaign_id): return self.api.get_request(f"advocacy_campaigns/{advocacy_campaign_id}") # Attendances - def get_person_attendances( - self, person_id, limit=None, per_page=25, page=None, filter=None - ): + def get_person_attendances(self, person_id, limit=None, per_page=25, page=None, filter=None): """ `Args:` person_id: @@ -119,16 +117,10 @@ def get_person_attendances( https://actionnetwork.org/docs/v2/attendances """ if page: - return self._get_page( - f"people/{person_id}/attendances", page, per_page, filter - ) - return self._get_entry_list( - f"people/{person_id}/attendances", limit, per_page, filter - ) + return self._get_page(f"people/{person_id}/attendances", page, per_page, filter) + return self._get_entry_list(f"people/{person_id}/attendances", limit, per_page, filter) - def get_event_attendances( - self, event_id, limit=None, per_page=25, page=None, filter=None - ): + def get_event_attendances(self, event_id, limit=None, per_page=25, page=None, filter=None): """ `Args:` event_id: the unique id of the event @@ -147,12 +139,8 @@ def get_event_attendances( https://actionnetwork.org/docs/v2/attendances """ if page: - return self._get_page( - f"events/{event_id}/attendances", page, per_page, filter - ) - return self._get_entry_list( - f"events/{event_id}/attendances", limit, per_page, filter - ) + return self._get_page(f"events/{event_id}/attendances", page, per_page, filter) + return self._get_entry_list(f"events/{event_id}/attendances", limit, per_page, filter) def get_event_attendance(self, event_id, attendance_id): """ @@ -218,9 +206,7 @@ def update_attendance(self, event_id, attendance_id, payload): `Documentation Reference`: https://actionnetwork.org/docs/v2/attendances """ - return self.api.put_request( - f"events/{event_id}/attendances/{attendance_id}", payload - ) + return self.api.put_request(f"events/{event_id}/attendances/{attendance_id}", payload) # Campaigns def get_campaigns(self, limit=None, per_page=25, page=None, filter=None): @@ -336,9 +322,7 @@ def get_fundraising_page_donations( filter, ) - def get_person_donations( - self, person_id, limit=None, per_page=25, page=None, filter=None - ): + def get_person_donations(self, person_id, limit=None, per_page=25, page=None, filter=None): """ `Args:` person_id: The id of the person @@ -477,9 +461,7 @@ def create_event_in_event_campaign(self, event_campaign_id, payload): `Documentation Reference`: https://actionnetwork.org/docs/v2/event_campaigns """ - return self.api.post_request( - f"event_campaigns/{event_campaign_id}/events", payload - ) + return self.api.post_request(f"event_campaigns/{event_campaign_id}/events", payload) def update_event_campaign(self, event_campaign_id, payload): """ @@ -601,9 +583,7 @@ def create_event(self, title, start_date=None, location=None): if isinstance(location, dict): data["location"] = location - event_dict = self.api.post_request( - url=f"{self.api_url}/events", data=json.dumps(data) - ) + event_dict = self.api.post_request(url=f"{self.api_url}/events", data=json.dumps(data)) an_event_id = event_dict["_links"]["self"]["href"].split("/")[-1] event_dict["event_id"] = an_event_id @@ -997,9 +977,7 @@ def get_advocacy_campaign_outreaches( filter, ) - def get_person_outreaches( - self, person_id, limit=None, per_page=25, page=None, filter=None - ): + def get_person_outreaches(self, person_id, limit=None, per_page=25, page=None, filter=None): """ `Args:` person_id: @@ -1020,12 +998,8 @@ def get_person_outreaches( https://actionnetwork.org/docs/v2/outreaches """ if page: - return self._get_page( - f"people/{person_id}/outreaches", page, per_page, filter - ) - return self._get_entry_list( - f"people/{person_id}/outreaches", limit, per_page, filter - ) + return self._get_page(f"people/{person_id}/outreaches", page, per_page, filter) + return self._get_entry_list(f"people/{person_id}/outreaches", limit, per_page, filter) def get_advocacy_campaign_outreach(self, advocacy_campaign_id, outreach_id): """ @@ -1242,9 +1216,7 @@ def upsert_person( {"number": re.sub("[^0-9]", "", mobile_number), "status": mobile_status} ] elif isinstance(mobile_number, int): - mobile_numbers_field = [ - {"number": str(mobile_number), "status": mobile_status} - ] + mobile_numbers_field = [{"number": str(mobile_number), "status": mobile_status}] elif isinstance(mobile_number, list): if len(mobile_number) > 1: raise ("Action Network allows only 1 phone number per activist") @@ -1294,9 +1266,7 @@ def upsert_person( identifiers = response["identifiers"] person_id = [ - entry_id.split(":")[1] - for entry_id in identifiers - if "action_network:" in entry_id + entry_id.split(":")[1] for entry_id in identifiers if "action_network:" in entry_id ] if not person_id: logger.error(f"Response gave no valid person_id: {identifiers}") @@ -1324,9 +1294,7 @@ def add_person( Creates a person in the database. WARNING: this endpoint has been deprecated in favor of upsert_person. """ - logger.warning( - "Method 'add_person' has been deprecated. Please use 'upsert_person'." - ) + logger.warning("Method 'add_person' has been deprecated. Please use 'upsert_person'.") # Pass inputs to preferred method: self.upsert_person( email_address=email_address, @@ -1546,9 +1514,7 @@ def get_query(self, query_id): return self.api.get_request(f"queries/{query_id}") # Signatures - def get_petition_signatures( - self, petition_id, limit=None, per_page=25, page=None, filter=None - ): + def get_petition_signatures(self, petition_id, limit=None, per_page=25, page=None, filter=None): """ `Args:` petition_id: @@ -1568,16 +1534,10 @@ def get_petition_signatures( https://actionnetwork.org/docs/v2/signatures """ if page: - return self._get_page( - f"petitions/{petition_id}/signatures", page, per_page, filter - ) - return self._get_entry_list( - f"petitions/{petition_id}/signatures", limit, per_page, filter - ) + return self._get_page(f"petitions/{petition_id}/signatures", page, per_page, filter) + return self._get_entry_list(f"petitions/{petition_id}/signatures", limit, per_page, filter) - def get_person_signatures( - self, person_id, limit=None, per_page=25, page=None, filter=None - ): + def get_person_signatures(self, person_id, limit=None, per_page=25, page=None, filter=None): """ `Args:` person_id: @@ -1599,12 +1559,8 @@ def get_person_signatures( https://actionnetwork.org/docs/v2/signatures """ if page: - return self._get_page( - f"people/{person_id}/signatures", page, per_page, filter - ) - return self._get_entry_list( - f"people/{person_id}/signatures", limit, per_page, filter - ) + return self._get_page(f"people/{person_id}/signatures", page, per_page, filter) + return self._get_entry_list(f"people/{person_id}/signatures", limit, per_page, filter) def get_petition_signature(self, petition_id, signature_id): """ @@ -1618,9 +1574,7 @@ def get_petition_signature(self, petition_id, signature_id): `Documentation Reference`: https://actionnetwork.org/docs/v2/signatures """ - return self.api.get_request( - f"petitions/{petition_id}/signatures/{signature_id}" - ) + return self.api.get_request(f"petitions/{petition_id}/signatures/{signature_id}") def get_person_signature(self, person_id, signature_id): """ @@ -1673,14 +1627,10 @@ def update_signature(self, petition_id, signature_id, data): `Documentation Reference`: https://actionnetwork.org/docs/v2/signatures """ - return self.api.put_request( - f"petitions/{petition_id}/signatures/{signature_id}", data - ) + return self.api.put_request(f"petitions/{petition_id}/signatures/{signature_id}", data) # Submissions - def get_form_submissions( - self, form_id, limit=None, per_page=25, page=None, filter=None - ): + def get_form_submissions(self, form_id, limit=None, per_page=25, page=None, filter=None): """ `Args:` form_id: @@ -1701,16 +1651,10 @@ def get_form_submissions( https://actionnetwork.org/docs/v2/submissions """ if page: - return self._get_page( - f"forms/{form_id}/submissions", page, per_page, filter - ) - return self._get_entry_list( - f"forms/{form_id}/submissions", limit, per_page, filter - ) + return self._get_page(f"forms/{form_id}/submissions", page, per_page, filter) + return self._get_entry_list(f"forms/{form_id}/submissions", limit, per_page, filter) - def get_person_submissions( - self, person_id, limit=None, per_page=25, page=None, filter=None - ): + def get_person_submissions(self, person_id, limit=None, per_page=25, page=None, filter=None): """ `Args:` person_id: @@ -1730,12 +1674,8 @@ def get_person_submissions( https://actionnetwork.org/docs/v2/submissions """ if page: - return self._get_page( - f"people/{person_id}/submissions", page, per_page, filter - ) - return self._get_entry_list( - f"people/{person_id}/submissions", limit, per_page, filter - ) + return self._get_page(f"people/{person_id}/submissions", page, per_page, filter) + return self._get_entry_list(f"people/{person_id}/submissions", limit, per_page, filter) def get_form_submission(self, form_id, submission_id): """ @@ -1779,14 +1719,10 @@ def create_submission(self, form_id, person_id): """ payload = { "_links": { - "osdi:person": { - "href": f"https://actionnetwork.org/api/v2/people/{person_id}" - } + "osdi:person": {"href": f"https://actionnetwork.org/api/v2/people/{person_id}"} } } - return self.api.post_request( - f"forms/{form_id}/submissions", data=json.dumps(payload) - ) + return self.api.post_request(f"forms/{form_id}/submissions", data=json.dumps(payload)) def update_submission(self, form_id, submission_id, data): """ @@ -1855,14 +1791,10 @@ def add_tag(self, name): https://actionnetwork.org/docs/v2/tags """ data = {"name": name} - response = self.api.post_request( - url=f"{self.api_url}/tags", data=json.dumps(data) - ) + response = self.api.post_request(url=f"{self.api_url}/tags", data=json.dumps(data)) identifiers = response["identifiers"] person_id = [ - entry_id.split(":")[1] - for entry_id in identifiers - if "action_network:" in entry_id + entry_id.split(":")[1] for entry_id in identifiers if "action_network:" in entry_id ][0] logger.info(f"Tag {person_id} successfully added to tags.") return response @@ -1924,9 +1856,7 @@ def create_tagging(self, tag_id, payload): `Documentation Reference`: https://actionnetwork.org/docs/v2/taggings """ - return self.api.post_request( - f"tags/{tag_id}/taggings", data=json.dumps(payload) - ) + return self.api.post_request(f"tags/{tag_id}/taggings", data=json.dumps(payload)) def delete_tagging(self, tag_id, tagging_id): """ diff --git a/parsons/alchemer/alchemer.py b/parsons/alchemer/alchemer.py index cf81c1fb4b..56a76f42db 100644 --- a/parsons/alchemer/alchemer.py +++ b/parsons/alchemer/alchemer.py @@ -15,9 +15,7 @@ def sg_compatibility(): os.environ["ALCHEMER_API_TOKEN"] = os.getenv("SURVEYGIZMO_API_TOKEN") if os.getenv("SURVEYGIZMO_API_TOKEN_SECRET"): - os.environ["ALCHEMER_API_TOKEN_SECRET"] = os.getenv( - "SURVEYGIZMO_API_TOKEN_SECRET" - ) + os.environ["ALCHEMER_API_TOKEN_SECRET"] = os.getenv("SURVEYGIZMO_API_TOKEN_SECRET") if os.getenv("SURVEYGIZMO_API_VERSION"): os.environ["ALCHEMER_API_VERSION"] = os.getenv("SURVEYGIZMO_API_VERSION") @@ -50,9 +48,7 @@ def __init__(self, api_token=None, api_token_secret=None, api_version="v5"): sg_compatibility() self.api_token = check_env.check("ALCHEMER_API_TOKEN", api_token) - self.api_token_secret = check_env.check( - "ALCHEMER_API_TOKEN_SECRET", api_token_secret - ) + self.api_token_secret = check_env.check("ALCHEMER_API_TOKEN_SECRET", api_token_secret) self.api_version = check_env.check("ALCHEMER_API_VERSION", api_version) self._client = surveygizmo.SurveyGizmo( @@ -111,9 +107,7 @@ def get_survey_responses(self, survey_id, page=None): if not page: while r["page"] < r["total_pages"]: - r = self._client.api.surveyresponse.list( - survey_id, page=(r["page"] + 1) - ) + r = self._client.api.surveyresponse.list(survey_id, page=(r["page"] + 1)) data.extend(r["data"]) tbl = Table(data).add_column("survey_id", survey_id, index=1) diff --git a/parsons/auth0/auth0.py b/parsons/auth0/auth0.py index 45d4e6d2c2..adb07f368c 100644 --- a/parsons/auth0/auth0.py +++ b/parsons/auth0/auth0.py @@ -33,9 +33,7 @@ def __init__(self, client_id=None, client_secret=None, domain=None): data={ "grant_type": "client_credentials", # OAuth 2.0 flow to use "client_id": check_env.check("AUTH0_CLIENT_ID", client_id), - "client_secret": check_env.check( - "AUTH0_CLIENT_SECRET", client_secret - ), + "client_secret": check_env.check("AUTH0_CLIENT_SECRET", client_secret), "audience": f"{self.base_url}/api/v2/", }, ) @@ -130,9 +128,7 @@ def upsert_user( data=payload, ) else: - ret = requests.post( - f"{self.base_url}/api/v2/users", headers=self.headers, data=payload - ) + ret = requests.post(f"{self.base_url}/api/v2/users", headers=self.headers, data=payload) if ret.status_code != 200: raise ValueError(f"Invalid response {ret.json()}") return ret @@ -175,8 +171,7 @@ def retrieve_all_users(self, connection="Username-Password-Authentication"): headers = self.headers fields = [ - {"name": n} - for n in ["user_id", "username", "email", "user_metadata", "app_metadata"] + {"name": n} for n in ["user_id", "username", "email", "user_metadata", "app_metadata"] ] # Start the users-export job response = requests.post( diff --git a/parsons/aws/lambda_distribute.py b/parsons/aws/lambda_distribute.py index 4ed319cada..850749e5b6 100644 --- a/parsons/aws/lambda_distribute.py +++ b/parsons/aws/lambda_distribute.py @@ -42,9 +42,7 @@ def __init__(self, use_env_token=True): self.s3 = S3(use_env_token=use_env_token) def put_object(self, bucket, key, object_bytes, **kwargs): - return self.s3.client.put_object( - Bucket=bucket, Key=key, Body=object_bytes, **kwargs - ) + return self.s3.client.put_object(Bucket=bucket, Key=key, Body=object_bytes, **kwargs) def get_range(self, bucket, key, rangestart, rangeend): """ @@ -133,9 +131,7 @@ def distribute_task_csv( ], # if we are using local storage, then it must be run locally, as well # (good for testing/debugging) - remote_aws_lambda_function_name="FORCE_LOCAL" - if storage == "local" - else None, + remote_aws_lambda_function_name="FORCE_LOCAL" if storage == "local" else None, ) for grp in group_ranges ] @@ -259,9 +255,7 @@ def process_task_portion( except Exception: # In Lambda you can search for '"Distribute Error"' in the logs type_, value_, traceback_ = sys.exc_info() - err_traceback_str = "\n".join( - traceback.format_exception(type_, value_, traceback_) - ) + err_traceback_str = "\n".join(traceback.format_exception(type_, value_, traceback_)) return { "Exception": "Distribute Error", "error": err_traceback_str, diff --git a/parsons/aws/s3.py b/parsons/aws/s3.py index 66d7254556..a1fc472fd1 100644 --- a/parsons/aws/s3.py +++ b/parsons/aws/s3.py @@ -195,16 +195,10 @@ def list_keys( continue # Match timestamp parsing - if ( - date_modified_before - and not key["LastModified"] < date_modified_before - ): + if date_modified_before and not key["LastModified"] < date_modified_before: continue - if ( - date_modified_after - and not key["LastModified"] > date_modified_after - ): + if date_modified_after and not key["LastModified"] > date_modified_after: continue # Convert date to iso string @@ -275,9 +269,7 @@ def create_bucket(self, bucket): self.client.create_bucket(Bucket=bucket) - def put_file( - self, bucket, key, local_path, acl="bucket-owner-full-control", **kwargs - ): + def put_file(self, bucket, key, local_path, acl="bucket-owner-full-control", **kwargs): """ Uploads an object to an S3 bucket @@ -296,9 +288,7 @@ def put_file( info. """ - self.client.upload_file( - local_path, bucket, key, ExtraArgs={"ACL": acl, **kwargs} - ) + self.client.upload_file(local_path, bucket, key, ExtraArgs={"ACL": acl, **kwargs}) def remove_file(self, bucket, key): """ @@ -442,9 +432,7 @@ def transfer_bucket( dest_key = key copy_source = {"Bucket": origin_bucket, "Key": key} - self.client.copy( - copy_source, destination_bucket, dest_key, ExtraArgs=kwargs - ) + self.client.copy(copy_source, destination_bucket, dest_key, ExtraArgs=kwargs) if remove_original: try: self.remove_file(origin_bucket, origin_key) diff --git a/parsons/azure/azure_blob_storage.py b/parsons/azure/azure_blob_storage.py index 9d824ce021..1f28572339 100644 --- a/parsons/azure/azure_blob_storage.py +++ b/parsons/azure/azure_blob_storage.py @@ -44,9 +44,7 @@ def __init__( self.credential = check_env.check("AZURE_CREDENTIAL", credential) if not self.account_url: self.account_name = check_env.check("AZURE_ACCOUNT_NAME", account_name) - self.account_domain = check_env.check( - "AZURE_ACCOUNT_DOMAIN", account_domain - ) + self.account_domain = check_env.check("AZURE_ACCOUNT_DOMAIN", account_domain) self.account_url = f"https://{self.account_name}.{self.account_domain}/" else: if not self.account_url.startswith("http"): @@ -55,9 +53,7 @@ def __init__( parsed_url = urlparse(self.account_url) self.account_name = parsed_url.netloc.split(".")[0] self.account_domain = ".".join(parsed_url.netloc.split(".")[1:]) - self.client = BlobServiceClient( - account_url=self.account_url, credential=self.credential - ) + self.client = BlobServiceClient(account_url=self.account_url, credential=self.credential) def list_containers(self): """ @@ -68,9 +64,7 @@ def list_containers(self): List of container names """ - container_names = [ - container.name for container in self.client.list_containers() - ] + container_names = [container.name for container in self.client.list_containers()] logger.info(f"Found {len(container_names)} containers.") return container_names @@ -108,9 +102,7 @@ def get_container(self, container_name): logger.info(f"Returning {container_name} container client") return self.client.get_container_client(container_name) - def create_container( - self, container_name, metadata=None, public_access=None, **kwargs - ): + def create_container(self, container_name, metadata=None, public_access=None, **kwargs): """ Create a container @@ -165,10 +157,7 @@ def list_blobs(self, container_name, name_starts_with=None): """ container_client = self.get_container(container_name) - blobs = [ - blob - for blob in container_client.list_blobs(name_starts_with=name_starts_with) - ] + blobs = [blob for blob in container_client.list_blobs(name_starts_with=name_starts_with)] logger.info(f"Found {len(blobs)} blobs in {container_name} container.") return blobs @@ -408,9 +397,7 @@ def upload_table(self, table, container_name, blob_name, data_type="csv", **kwar local_path = table.to_json() content_type = "application/json" else: - raise ValueError( - f"Unknown data_type value ({data_type}): must be one of: csv or json" - ) + raise ValueError(f"Unknown data_type value ({data_type}): must be one of: csv or json") return self.put_blob( container_name, blob_name, local_path, content_type=content_type, **kwargs diff --git a/parsons/bill_com/bill_com.py b/parsons/bill_com/bill_com.py index 83471ec2b7..1b0a411bfd 100644 --- a/parsons/bill_com/bill_com.py +++ b/parsons/bill_com/bill_com.py @@ -26,9 +26,7 @@ def __init__(self, user_name, password, org_id, dev_key, api_url): "orgId": org_id, "devKey": dev_key, } - response = requests.post( - url="%sLogin.json" % api_url, data=params, headers=self.headers - ) + response = requests.post(url="%sLogin.json" % api_url, data=params, headers=self.headers) self.dev_key = dev_key self.api_url = api_url self.session_id = response.json()["response_data"]["sessionId"] @@ -257,13 +255,7 @@ def get_or_create_customer(self, customer_name, customer_email, **kwargs): return self._get_request_response(data, "Create", "Customer") def create_invoice( - self, - customer_id, - invoice_number, - invoice_date, - due_date, - invoice_line_items, - **kwargs + self, customer_id, invoice_number, invoice_date, due_date, invoice_line_items, **kwargs ): """ `Args:` @@ -302,13 +294,7 @@ def create_invoice( return self._get_request_response(data, "Create", "Invoice") def send_invoice( - self, - invoice_id, - from_user_id, - to_email_addresses, - message_subject, - message_body, - **kwargs + self, invoice_id, from_user_id, to_email_addresses, message_subject, message_body, **kwargs ): """ `Args:` diff --git a/parsons/bloomerang/bloomerang.py b/parsons/bloomerang/bloomerang.py index eb6cd858e5..e90c64b811 100644 --- a/parsons/bloomerang/bloomerang.py +++ b/parsons/bloomerang/bloomerang.py @@ -35,9 +35,7 @@ class Bloomerang(object): def __init__(self, api_key=None, client_id=None, client_secret=None): self.api_key = check_env.check("BLOOMERANG_API_KEY", api_key, optional=True) - self.client_id = check_env.check( - "BLOOMERANG_CLIENT_ID", client_id, optional=True - ) + self.client_id = check_env.check("BLOOMERANG_CLIENT_ID", client_id, optional=True) self.client_secret = check_env.check( "BLOOMERANG_CLIENT_SECRET", client_secret, optional=True ) @@ -108,9 +106,7 @@ def _base_update(self, endpoint, entity_id=None, **kwargs): ) def _base_get(self, endpoint, entity_id=None, params=None): - return self.conn.get_request( - url=self._base_endpoint(endpoint, entity_id), params=params - ) + return self.conn.get_request(url=self._base_endpoint(endpoint, entity_id), params=params) def _base_delete(self, endpoint, entity_id=None): return self.conn.delete_request(url=self._base_endpoint(endpoint, entity_id)) @@ -227,9 +223,7 @@ def delete_transaction(self, transaction_id): """ return self._base_delete("transaction", entity_id=transaction_id) - def get_transactions( - self, page_number=1, page_size=50, order_by=None, order_direction=None - ): + def get_transactions(self, page_number=1, page_size=50, order_by=None, order_direction=None): """ `Args:` page_number: int diff --git a/parsons/bluelink/bluelink.py b/parsons/bluelink/bluelink.py index d132d00c9c..93e2f933d0 100644 --- a/parsons/bluelink/bluelink.py +++ b/parsons/bluelink/bluelink.py @@ -29,9 +29,7 @@ def __init__(self, user=None, password=None): "Content-Type": "application/json", } self.api_url = API_URL - self.api = APIConnector( - self.api_url, auth=(self.user, self.password), headers=self.headers - ) + self.api = APIConnector(self.api_url, auth=(self.user, self.password), headers=self.headers) def upsert_person(self, source, person=None): """ diff --git a/parsons/box/box.py b/parsons/box/box.py index 56b1aab0f6..9422fbba08 100644 --- a/parsons/box/box.py +++ b/parsons/box/box.py @@ -89,9 +89,7 @@ def create_folder(self, path) -> str: parent_folder_id = DEFAULT_FOLDER_ID return self.create_folder_by_id(folder_name, parent_folder_id=parent_folder_id) - def create_folder_by_id( - self, folder_name, parent_folder_id=DEFAULT_FOLDER_ID - ) -> str: + def create_folder_by_id(self, folder_name, parent_folder_id=DEFAULT_FOLDER_ID) -> str: """Create a Box folder. `Args`: diff --git a/parsons/braintree/braintree.py b/parsons/braintree/braintree.py index adf07d1dbf..c9d2b08df1 100644 --- a/parsons/braintree/braintree.py +++ b/parsons/braintree/braintree.py @@ -199,9 +199,7 @@ def __init__( ) ) - def get_disputes( - self, start_date=None, end_date=None, query_list=None, query_dict=None - ): + def get_disputes(self, start_date=None, end_date=None, query_list=None, query_dict=None): """ Get a table of disputes based on query parameters. There are three ways to pass query arguments: Pass a start_date and end_date @@ -246,12 +244,9 @@ def get_disputes( # Iterating on collection.items triggers web requests in batches of 50 records # Disputes query api doesn't return the ids -- we can't do anything but iterate if not collection.is_success: - raise ParsonsBraintreeError( - f"Braintree dispute query failed: {collection.message}" - ) + raise ParsonsBraintreeError(f"Braintree dispute query failed: {collection.message}") return Table( - [self._dispute_header()] - + [self._dispute_to_row(r) for r in collection.disputes.items] + [self._dispute_header()] + [self._dispute_to_row(r) for r in collection.disputes.items] ) def get_subscriptions( @@ -334,10 +329,7 @@ def get_subscriptions( logger.debug("Braintree subscriptions iterating to build subscriptions table") return Table( [self._subscription_header(include_transactions)] - + [ - self._subscription_to_row(include_transactions, r) - for r in collection.items - ] + + [self._subscription_to_row(include_transactions, r) for r in collection.items] ) def get_transactions( @@ -407,9 +399,7 @@ def get_transactions( ), ) query_count = len(collection.ids) - logger.info( - f"Braintree transactions resulted in transaction count of {query_count}" - ) + logger.info(f"Braintree transactions resulted in transaction count of {query_count}") if just_ids: return Table([("id",)] + [[item_id] for item_id in collection.ids]) @@ -419,8 +409,7 @@ def get_transactions( # but it, too, paginates with a max of 50 records logger.debug("Braintree transactions iterating to build transaction table") return Table( - [self._transaction_header()] - + [self._transaction_to_row(r) for r in collection.items] + [self._transaction_header()] + [self._transaction_to_row(r) for r in collection.items] ) def _dispute_header(self): @@ -454,14 +443,8 @@ def _transaction_to_row(self, collection_item): ) for k in self.credit_card_fields ] - + [ - getattr(collection_item.disbursement_details, k) - for k in self.disbursement_fields - ] - + [ - getattr(collection_item.customer_details, k) - for k in self.customer_fields - ] + + [getattr(collection_item.disbursement_details, k) for k in self.disbursement_fields] + + [getattr(collection_item.customer_details, k) for k in self.customer_fields] + [getattr(collection_item, k) for k in self.transaction_fields] ) @@ -541,9 +524,7 @@ def _get_query_objects(self, query_type, **queryparams): for qual, vals in filters.items(): # likely only one, but fine queryobj_qualfunc = getattr(queryobj, qual, None) if not queryobj_qualfunc: - raise ParsonsBraintreeError( - "oh no, that's not a braintree parameter" - ) + raise ParsonsBraintreeError("oh no, that's not a braintree parameter") if not isinstance(vals, list): vals = [vals] queries.append(queryobj_qualfunc(*vals)) diff --git a/parsons/capitol_canary/capitol_canary.py b/parsons/capitol_canary/capitol_canary.py index cf08fb94ff..4dc47d2d1a 100644 --- a/parsons/capitol_canary/capitol_canary.py +++ b/parsons/capitol_canary/capitol_canary.py @@ -56,9 +56,7 @@ def _paginate_request(self, url, args=None, page=None): return json - def get_advocates( - self, state=None, campaign_id=None, updated_since=None, page=None - ): + def get_advocates(self, state=None, campaign_id=None, updated_since=None, page=None): """ Return advocates (person records). @@ -251,9 +249,7 @@ def create_advocate( # Validate the passed in arguments if not campaigns: - raise ValueError( - "When creating an advocate, you must specify one or more campaigns." - ) + raise ValueError("When creating an advocate, you must specify one or more campaigns.") if not email and not phone: raise ValueError( diff --git a/parsons/catalist/catalist.py b/parsons/catalist/catalist.py index 6015ff1d83..a67f017cdb 100644 --- a/parsons/catalist/catalist.py +++ b/parsons/catalist/catalist.py @@ -75,9 +75,7 @@ def __init__( ) self.sftp = SFTP("t.catalist.us", sftp_username, sftp_password) - def load_table_to_sftp( - self, table: Table, input_subfolder: Optional[str] = None - ) -> str: + def load_table_to_sftp(self, table: Table, input_subfolder: Optional[str] = None) -> str: """Load table to Catalist sftp bucket as gzipped CSV for matching. If input_subfolder is specific, the file will be uploaded to a subfolder of the @@ -197,9 +195,7 @@ def upload( # upload table to s3 temp location sftp_file_path = self.load_table_to_sftp(table, input_subfolder) - sftp_file_path_encoded = base64.b64encode( - sftp_file_path.encode("ascii") - ).decode("ascii") + sftp_file_path_encoded = base64.b64encode(sftp_file_path.encode("ascii")).decode("ascii") if export: action = "export%2Cpublish" @@ -223,9 +219,7 @@ def upload( endpoint = "/".join(endpoint_params) # Assemble query parameters - query_params: Dict[str, Union[str, int]] = { - "token": self.connection.token["access_token"] - } + query_params: Dict[str, Union[str, int]] = {"token": self.connection.token["access_token"]} if copy_to_sandbox: query_params["copyToSandbox"] = "true" if static_values: @@ -362,9 +356,7 @@ def load_matches(self, id: str) -> Table: raise RuntimeError(err_msg) remote_filepaths = self.sftp.list_directory("/myDownloads/") - remote_filename = [filename for filename in remote_filepaths if id in filename][ - 0 - ] + remote_filename = [filename for filename in remote_filepaths if id in filename][0] remote_filepath = "/myDownloads/" + remote_filename temp_file_zip = self.sftp.get_file(remote_filepath) temp_dir = tempfile.mkdtemp() @@ -418,8 +410,6 @@ def validate_table(self, table: Table, template_id: str = "48827") -> None: errors["missing_required_columns"] = missing_required_columns if errors: - raise ValueError( - "Input table does not have the right structure. %s", errors - ) + raise ValueError("Input table does not have the right structure. %s", errors) else: logger.info("Table structure validated.") diff --git a/parsons/census/census.py b/parsons/census/census.py index 0be254d7a8..0939ecc749 100644 --- a/parsons/census/census.py +++ b/parsons/census/census.py @@ -22,9 +22,7 @@ def __init__(self, api_key=None): self.api_key = check_env.check("CENSUS_API_KEY", api_key) self.host = "https://api.census.gov/data" - def get_census( - self, year=None, dataset_acronym=None, variables=None, location=None - ): + def get_census(self, year=None, dataset_acronym=None, variables=None, location=None): """ Pull census data. For background check out the `Census API Guide `_ @@ -50,9 +48,7 @@ def get_census( usr_key = f"&key={self.api_key}" year = str(year) # in case someone passes int location = "&for=" + location - query_url = ( - f"{self.host}/{year}{dataset_acronym}{g}{variables}{location}{usr_key}" - ) + query_url = f"{self.host}/{year}{dataset_acronym}{g}{variables}{location}{usr_key}" # create connector connector = APIConnector(uri=self.host) diff --git a/parsons/civis/civisclient.py b/parsons/civis/civisclient.py index 41519182a4..897bc74a0f 100644 --- a/parsons/civis/civisclient.py +++ b/parsons/civis/civisclient.py @@ -30,9 +30,7 @@ def __init__(self, db=None, api_key=None, **kwargs): can be found by reading the Civis API client `documentation `_. """ # noqa: E501 - def query( - self, sql, preview_rows=10, polling_interval=None, hidden=True, wait=True - ): + def query(self, sql, preview_rows=10, polling_interval=None, hidden=True, wait=True): """ Execute a SQL statement as a Civis query. @@ -89,7 +87,7 @@ def table_import( sortkey1=None, sortkey2=None, wait=True, - **civisargs + **civisargs, ): """ Write the table to a Civis Redshift cluster. Additional key word @@ -135,7 +133,7 @@ def table_import( sortkey1=sortkey1, sortkey2=sortkey2, headers=True, - **civisargs + **civisargs, ) if wait: diff --git a/parsons/copper/copper.py b/parsons/copper/copper.py index b81aa23e83..cae7f8f55b 100644 --- a/parsons/copper/copper.py +++ b/parsons/copper/copper.py @@ -202,9 +202,7 @@ def get_standard_object(self, object_name, filters=None, tidy=False): # Retrieve and process a standard endpoint object (e.g. people, companies, etc.) logger.info(f"Retrieving {object_name} records.") - blob = self.paginate_request( - f"/{object_name}/search", req_type="POST", filters=filters - ) + blob = self.paginate_request(f"/{object_name}/search", req_type="POST", filters=filters) return self.process_json(blob, object_name, tidy=tidy) @@ -320,14 +318,10 @@ def process_json(self, json_blob, obj_type, tidy=False): logger.debug(p, "needs to be unpacked into rows") # Determine whether or not to expand based on tidy - unpacked_tidy = obj_table.unpack_nested_columns_as_rows( - p, expand_original=tidy - ) + unpacked_tidy = obj_table.unpack_nested_columns_as_rows(p, expand_original=tidy) # Check if column was removed as sign it was unpacked into separate table if p not in obj_table.columns: - table_list.append( - {"name": f"{obj_type}_{p}", "tbl": unpacked_tidy} - ) + table_list.append({"name": f"{obj_type}_{p}", "tbl": unpacked_tidy}) else: obj_table = unpacked_tidy diff --git a/parsons/crowdtangle/crowdtangle.py b/parsons/crowdtangle/crowdtangle.py index 1f5841c9d7..acc602aa6b 100644 --- a/parsons/crowdtangle/crowdtangle.py +++ b/parsons/crowdtangle/crowdtangle.py @@ -177,9 +177,7 @@ def get_posts( self._unpack(pt) return pt - def get_leaderboard( - self, start_date=None, end_date=None, list_ids=None, account_ids=None - ): + def get_leaderboard(self, start_date=None, end_date=None, list_ids=None, account_ids=None): """ Return leaderboard data. @@ -219,9 +217,7 @@ def get_leaderboard( self._unpack(pt) return pt - def get_links( - self, link, start_date=None, end_date=None, include_summary=None, platforms=None - ): + def get_links(self, link, start_date=None, end_date=None, include_summary=None, platforms=None): """ Return up to 100 posts based on a specific link. It is strongly recommended to use the ``start_date`` parameter to limit queries to relevant dates. diff --git a/parsons/databases/database/database.py b/parsons/databases/database/database.py index bfec754241..843d375c49 100644 --- a/parsons/databases/database/database.py +++ b/parsons/databases/database/database.py @@ -101,11 +101,7 @@ def is_valid_sql_num(self, val): # then it's a valid sql number # Also check the first character is not zero try: - if ( - (float(val) or 1) - and "_" not in val - and (val in ("0", "0.0") or val[0] != "0") - ): + if (float(val) or 1) and "_" not in val and (val in ("0", "0.0") or val[0] != "0"): return True else: return False @@ -156,9 +152,7 @@ def detect_data_type(self, value, cmp_type=None): # The value is very likely an int # let's get its size # If the compare types are empty and use the types of the current value - elif isinstance(value, int) and cmp_type in ( - self.INT_TYPES + [None, "", self.BOOL] - ): + elif isinstance(value, int) and cmp_type in (self.INT_TYPES + [None, "", self.BOOL]): # Use smallest possible int type above TINYINT if self.SMALLINT_MIN < value < self.SMALLINT_MAX: result = self.get_bigger_int(self.SMALLINT, cmp_type) diff --git a/parsons/databases/db_sync.py b/parsons/databases/db_sync.py index aeb93f40a3..374d319f49 100644 --- a/parsons/databases/db_sync.py +++ b/parsons/databases/db_sync.py @@ -82,9 +82,7 @@ def table_sync_full( source_tbl = self.source_db.table(source_table) destination_tbl = self.dest_db.table(destination_table) - logger.info( - f"Syncing full table data from {source_table} to {destination_table}" - ) + logger.info(f"Syncing full table data from {source_table} to {destination_table}") # Drop or truncate if the destination table exists if destination_tbl.exists: @@ -101,17 +99,13 @@ def table_sync_full( logger.info(f"needed to drop {destination_tbl}...") destination_tbl.drop() else: - raise ValueError( - "Invalid if_exists argument. Must be drop or truncate." - ) + raise ValueError("Invalid if_exists argument. Must be drop or truncate.") # Create the table, if needed. if not destination_tbl.exists: self.create_table(source_table, destination_table) - copied_rows = self.copy_rows( - source_table, destination_table, None, order_by, **kwargs - ) + copied_rows = self.copy_rows(source_table, destination_table, None, order_by, **kwargs) if verify_row_count: self._row_count_verify(source_tbl, destination_tbl) @@ -162,9 +156,7 @@ def table_sync_incremental( "Destination tables %s does not exist, running a full sync", destination_table, ) - self.table_sync_full( - source_table, destination_table, order_by=primary_key, **kwargs - ) + self.table_sync_full(source_table, destination_table, order_by=primary_key, **kwargs) return # Check that the source table primary key is distinct @@ -193,9 +185,7 @@ def table_sync_incremental( # Check for a mismatch in row counts; if dest_max_pk is None, or destination is empty # and we don't have to worry about this check. if dest_max_pk is not None and dest_max_pk > source_max_pk: - raise ValueError( - "Destination DB primary key greater than source DB primary key." - ) + raise ValueError("Destination DB primary key greater than source DB primary key.") # Do not copied if row counts are equal. elif dest_max_pk == source_max_pk: @@ -214,9 +204,7 @@ def table_sync_incremental( logger.info(f"{source_table} synced to {destination_table}.") - def copy_rows( - self, source_table_name, destination_table_name, cutoff, order_by, **kwargs - ): + def copy_rows(self, source_table_name, destination_table_name, cutoff, order_by, **kwargs): """ Copy the rows from the source to the destination. @@ -298,12 +286,8 @@ def copy_rows( # If our buffer reaches our write threshold, write it out if rows_buffered >= self.write_chunk_size: - logger.debug( - "Copying %s rows to %s", rows_buffered, destination_table_name - ) - self.dest_db.copy( - buffer, destination_table_name, if_exists="append", **kwargs - ) + logger.debug("Copying %s rows to %s", rows_buffered, destination_table_name) + self.dest_db.copy(buffer, destination_table_name, if_exists="append", **kwargs) total_rows_written += rows_buffered # Reset the buffer diff --git a/parsons/databases/mysql/mysql.py b/parsons/databases/mysql/mysql.py index 3572d82bce..d3dddd0627 100644 --- a/parsons/databases/mysql/mysql.py +++ b/parsons/databases/mysql/mysql.py @@ -232,9 +232,7 @@ def copy( with self.connection() as connection: # Create table if not exists if self._create_table_precheck(connection, table_name, if_exists): - sql = self.create_statement( - tbl, table_name, strict_length=strict_length - ) + sql = self.create_statement(tbl, table_name, strict_length=strict_length) self.query_with_connection(sql, connection, commit=False) logger.info(f"Table {table_name} created.") diff --git a/parsons/databases/postgres/postgres.py b/parsons/databases/postgres/postgres.py index 1463ec85cd..fc1c8c1642 100644 --- a/parsons/databases/postgres/postgres.py +++ b/parsons/databases/postgres/postgres.py @@ -30,9 +30,7 @@ class Postgres(PostgresCore, Alchemy, DatabaseConnector): Seconds to timeout if connection not established. """ - def __init__( - self, username=None, password=None, host=None, db=None, port=5432, timeout=10 - ): + def __init__(self, username=None, password=None, host=None, db=None, port=5432, timeout=10): super().__init__() self.username = username or os.environ.get("PGUSER") @@ -84,9 +82,7 @@ def copy( if self._create_table_precheck(connection, table_name, if_exists): # Create the table # To Do: Pass in the advanced configuration parameters. - sql = self.create_statement( - tbl, table_name, strict_length=strict_length - ) + sql = self.create_statement(tbl, table_name, strict_length=strict_length) self.query_with_connection(sql, connection, commit=False) logger.info(f"{table_name} created.") diff --git a/parsons/databases/postgres/postgres_create_statement.py b/parsons/databases/postgres/postgres_create_statement.py index 7fa4cd0a41..1e5f3b8d78 100644 --- a/parsons/databases/postgres/postgres_create_statement.py +++ b/parsons/databases/postgres/postgres_create_statement.py @@ -158,9 +158,7 @@ def vc_max(self, mapping, columns): def vc_trunc(self, mapping): - return [ - self.VARCHAR_MAX if c > self.VARCHAR_MAX else c for c in mapping["longest"] - ] + return [self.VARCHAR_MAX if c > self.VARCHAR_MAX else c for c in mapping["longest"]] def vc_validate(self, mapping): diff --git a/parsons/databases/redshift/redshift.py b/parsons/databases/redshift/redshift.py index a68498579d..d9023fd6a5 100644 --- a/parsons/databases/redshift/redshift.py +++ b/parsons/databases/redshift/redshift.py @@ -95,9 +95,7 @@ def __init__( self.db = db or os.environ["REDSHIFT_DB"] self.port = port or os.environ["REDSHIFT_PORT"] except KeyError as error: - logger.error( - "Connection info missing. Most include as kwarg or " "env variable." - ) + logger.error("Connection info missing. Most include as kwarg or " "env variable.") raise error self.timeout = timeout @@ -682,9 +680,7 @@ def copy( } # Copy from S3 to Redshift - sql = self.copy_statement( - table_name, self.s3_temp_bucket, key, **copy_args - ) + sql = self.copy_statement(table_name, self.s3_temp_bucket, key, **copy_args) sql_censored = sql_helpers.redact_credentials(sql) logger.debug(f"Copy SQL command: {sql_censored}") @@ -1153,9 +1149,7 @@ def drop_dependencies_for_cols(self, schema, table, cols): tbl = self.query_with_connection(sql_depend, connection) dropped_views = [row["table_name"] for row in tbl] if dropped_views: - sql_drop = "\n".join( - [f"drop view {view} CASCADE;" for view in dropped_views] - ) + sql_drop = "\n".join([f"drop view {view} CASCADE;" for view in dropped_views]) tbl = self.query_with_connection(sql_drop, connection) logger.info(f"Dropped the following views: {dropped_views}") @@ -1186,9 +1180,7 @@ def alter_varchar_column_widths(self, tbl, table_name, drop_dependencies=False): s, t = self.split_full_table_name(table_name) cols = self.get_columns(s, t) rc = { - k: v["max_length"] - for k, v in cols.items() - if v["data_type"] == "character varying" + k: v["max_length"] for k, v in cols.items() if v["data_type"] == "character varying" } # noqa: E501, E261 # Figure out if any of the destination table varchar columns are smaller than the @@ -1204,13 +1196,9 @@ def alter_varchar_column_widths(self, tbl, table_name, drop_dependencies=False): new_size = pc[c] if drop_dependencies: self.drop_dependencies_for_cols(s, t, [c]) - self.alter_table_column_type( - table_name, c, "varchar", varchar_width=new_size - ) + self.alter_table_column_type(table_name, c, "varchar", varchar_width=new_size) - def alter_table_column_type( - self, table_name, column_name, data_type, varchar_width=None - ): + def alter_table_column_type(self, table_name, column_name, data_type, varchar_width=None): """ Alter a column type of an existing table. diff --git a/parsons/databases/redshift/rs_copy_table.py b/parsons/databases/redshift/rs_copy_table.py index 7b4fa578b7..a53bb229ee 100644 --- a/parsons/databases/redshift/rs_copy_table.py +++ b/parsons/databases/redshift/rs_copy_table.py @@ -60,9 +60,7 @@ def copy_statement( sql += "manifest \n" if bucket_region: sql += f"region '{bucket_region}'\n" - logger.info( - "Copying data from S3 bucket %s in region %s", bucket, bucket_region - ) + logger.info("Copying data from S3 bucket %s in region %s", bucket, bucket_region) sql += f"maxerror {max_errors} \n" # Redshift has some default behavior when statupdate is left out @@ -126,9 +124,7 @@ def get_creds(self, aws_access_key_id, aws_secret_access_key): aws_access_key_id = self.aws_access_key_id aws_secret_access_key = self.aws_secret_access_key - elif ( - "AWS_ACCESS_KEY_ID" in os.environ and "AWS_SECRET_ACCESS_KEY" in os.environ - ): + elif "AWS_ACCESS_KEY_ID" in os.environ and "AWS_SECRET_ACCESS_KEY" in os.environ: aws_access_key_id = os.environ["AWS_ACCESS_KEY_ID"] aws_secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"] diff --git a/parsons/databases/redshift/rs_create_table.py b/parsons/databases/redshift/rs_create_table.py index 7d37a81240..cce1894bdb 100644 --- a/parsons/databases/redshift/rs_create_table.py +++ b/parsons/databases/redshift/rs_create_table.py @@ -154,9 +154,7 @@ def vc_max(self, mapping, columns): return mapping["longest"] def vc_trunc(self, mapping): - return [ - self.VARCHAR_MAX if c > self.VARCHAR_MAX else c for c in mapping["longest"] - ] + return [self.VARCHAR_MAX if c > self.VARCHAR_MAX else c for c in mapping["longest"]] def vc_validate(self, mapping): return [1 if c == 0 else c for c in mapping["longest"]] @@ -220,9 +218,7 @@ def _log_key_warning(distkey=None, sortkey=None, method=""): warning = "".join( [ "You didn't provide a {} key to method `parsons.redshift.Redshift.{}`.\n" - "You can learn about best practices here:\n{}.\n".format( - keyname, method, keyinfo - ) + "You can learn about best practices here:\n{}.\n".format(keyname, method, keyinfo) for key, keyname, keyinfo in keys if not key ] diff --git a/parsons/databases/redshift/rs_table_utilities.py b/parsons/databases/redshift/rs_table_utilities.py index 50457e3166..fe1c1e4c2b 100644 --- a/parsons/databases/redshift/rs_table_utilities.py +++ b/parsons/databases/redshift/rs_table_utilities.py @@ -205,9 +205,7 @@ def populate_table_from_query( The column to use as the sortkey for the table. """ with self.connection() as conn: - should_create = self._create_table_precheck( - conn, destination_table, if_exists - ) + should_create = self._create_table_precheck(conn, destination_table, if_exists) if should_create: logger.info(f"Creating table {destination_table} from query...") @@ -252,9 +250,7 @@ def duplicate_table( """ with self.connection() as conn: - should_create = self._create_table_precheck( - conn, destination_table, if_exists - ) + should_create = self._create_table_precheck(conn, destination_table, if_exists) if should_create: logger.info(f"Creating {destination_table} from {source_table}...") @@ -428,11 +424,7 @@ def get_columns_list(self, schema, table_name): `Returns:` A list of column names. """ - schema = ( - f'"{schema}"' - if not (schema.startswith('"') and schema.endswith('"')) - else schema - ) + schema = f'"{schema}"' if not (schema.startswith('"') and schema.endswith('"')) else schema table_name = ( f'"{table_name}"' @@ -540,9 +532,7 @@ def get_max_value(self, table_name, value_column): The column containing the values """ - return self.query(f"SELECT MAX({value_column}) value from {table_name}")[0][ - "value" - ] + return self.query(f"SELECT MAX({value_column}) value from {table_name}")[0]["value"] def get_object_type(self, object_name): """ diff --git a/parsons/donorbox/donorbox.py b/parsons/donorbox/donorbox.py index 917a0ba156..f35ae55295 100644 --- a/parsons/donorbox/donorbox.py +++ b/parsons/donorbox/donorbox.py @@ -140,9 +140,7 @@ def get_donors(self, **kwargs): Parsons Table """ if "donor_id" in kwargs: - kwargs["id"] = kwargs.pop( - "donor_id" - ) # switch to Donorbox's (less specific) name + kwargs["id"] = kwargs.pop("donor_id") # switch to Donorbox's (less specific) name data = self.client.get_request("donors", params=kwargs) return Table(data) diff --git a/parsons/etl/etl.py b/parsons/etl/etl.py index 6c91deeb4d..25c696f192 100644 --- a/parsons/etl/etl.py +++ b/parsons/etl/etl.py @@ -431,10 +431,7 @@ def get_columns_type_stats(self): A list of dicts, each containing a column 'name' and a 'type' list """ - return [ - {"name": col, "type": self.get_column_types(col)} - for col in self.table.columns() - ] + return [{"name": col, "type": self.get_column_types(col)} for col in self.table.columns()] def convert_table(self, *args): """ @@ -608,11 +605,7 @@ def unpack_nested_columns_as_rows(self, column, key="id", expand_original=False) """ if isinstance(expand_original, int) and expand_original is not True: - lengths = { - len(row[column]) - for row in self - if isinstance(row[column], (dict, list)) - } + lengths = {len(row[column]) for row in self if isinstance(row[column], (dict, list))} max_len = sorted(lengths, reverse=True)[0] if max_len > expand_original: expand_original = False @@ -624,9 +617,7 @@ def unpack_nested_columns_as_rows(self, column, key="id", expand_original=False) else: # Otherwise, include only key and column, but keep all non-dict types in table_list table = self.cut(key, column) - table_list = table.select_rows( - lambda row: not isinstance(row[column], dict) - ) + table_list = table.select_rows(lambda row: not isinstance(row[column], dict)) # All the columns other than column to ignore while melting ignore_cols = table.columns @@ -663,17 +654,13 @@ def unpack_nested_columns_as_rows(self, column, key="id", expand_original=False) if expand_original: # Add unpacked rows to the original table (minus packed rows) - orig = self.select_rows( - lambda row: not isinstance(row[column], (dict, list)) - ) + orig = self.select_rows(lambda row: not isinstance(row[column], (dict, list))) orig.concat(melted_list) # Add unique id column by hashing all the other fields if "uid" not in self.columns: orig.add_column( "uid", - lambda row: hashlib.md5( - str.encode("".join([str(x) for x in row])) - ).hexdigest(), + lambda row: hashlib.md5(str.encode("".join([str(x) for x in row]))).hexdigest(), ) orig.move_column("uid", 0) @@ -688,9 +675,7 @@ def unpack_nested_columns_as_rows(self, column, key="id", expand_original=False) # Add unique id column by hashing all the other fields melted_list.add_column( "uid", - lambda row: hashlib.md5( - str.encode("".join([str(x) for x in row])) - ).hexdigest(), + lambda row: hashlib.md5(str.encode("".join([str(x) for x in row]))).hexdigest(), ) melted_list.move_column("uid", 0) output = melted_list @@ -922,8 +907,7 @@ def chunk(self, rows): from parsons.etl import Table return [ - Table(petl.rowslice(self.table, i, i + rows)) - for i in range(0, self.num_rows, rows) + Table(petl.rowslice(self.table, i, i + rows)) for i in range(0, self.num_rows, rows) ] @staticmethod @@ -973,14 +957,10 @@ def match_columns( from parsons.etl import Table # Just trying to avoid recursive imports. - normalize_fn = ( - Table.get_normalized_column_name if fuzzy_match else (lambda s: s) - ) + normalize_fn = Table.get_normalized_column_name if fuzzy_match else (lambda s: s) # Create a mapping of our "normalized" name to the original column name - current_columns_normalized = { - normalize_fn(col): col for col in reversed(self.columns) - } + current_columns_normalized = {normalize_fn(col): col for col in reversed(self.columns)} # Track any columns we need to add to our current table from our desired columns columns_to_add = [] @@ -1011,8 +991,7 @@ def match_columns( elif if_missing_columns != "ignore": # If it's not ignore, add, or fail, then it's not a valid strategy raise TypeError( - f"Invalid option {if_missing_columns} for " - "argument `if_missing_columns`" + f"Invalid option {if_missing_columns} for " "argument `if_missing_columns`" ) else: # We have found this in our current columns, so take it out of our list to search @@ -1036,8 +1015,7 @@ def match_columns( elif if_extra_columns != "remove": # If it's not ignore, add, or fail, then it's not a valid strategy raise TypeError( - f"Invalid option {if_extra_columns} for " - "argument `if_extra_columns`" + f"Invalid option {if_extra_columns} for " "argument `if_extra_columns`" ) # Add any columns we need to add @@ -1307,9 +1285,7 @@ def deduplicate(self, keys=None, presorted=False): """ - deduped = petl.transform.dedup.distinct( - self.table, key=keys, presorted=presorted - ) + deduped = petl.transform.dedup.distinct(self.table, key=keys, presorted=presorted) self.table = deduped return self diff --git a/parsons/etl/tofrom.py b/parsons/etl/tofrom.py index c87a3cdb12..ce2a41840e 100644 --- a/parsons/etl/tofrom.py +++ b/parsons/etl/tofrom.py @@ -194,9 +194,7 @@ def append_csv(self, local_path, encoding=None, errors="strict", **csvargs): The path of the file """ # noqa: W605 - petl.appendcsv( - self.table, source=local_path, encoding=encoding, errors=errors, **csvargs - ) + petl.appendcsv(self.table, source=local_path, encoding=encoding, errors=errors, **csvargs) return local_path def to_zip_csv( @@ -246,22 +244,14 @@ def to_zip_csv( if not archive_path: archive_path = files.create_temp_file(suffix=".zip") - cf = self.to_csv( - encoding=encoding, errors=errors, write_header=write_header, **csvargs - ) + cf = self.to_csv(encoding=encoding, errors=errors, write_header=write_header, **csvargs) if not csv_name: - csv_name = ( - files.extract_file_name(archive_path, include_suffix=False) + ".csv" - ) + csv_name = files.extract_file_name(archive_path, include_suffix=False) + ".csv" - return zip_archive.create_archive( - archive_path, cf, file_name=csv_name, if_exists=if_exists - ) + return zip_archive.create_archive(archive_path, cf, file_name=csv_name, if_exists=if_exists) - def to_json( - self, local_path=None, temp_file_compression=None, line_delimited=False - ): + def to_json(self, local_path=None, temp_file_compression=None, line_delimited=False): """ Outputs table to a JSON file @@ -817,9 +807,7 @@ def from_json(cls, local_path, header=None, line_delimited=False): return cls(petl.fromjson(local_path, header=header)) @classmethod - def from_redshift( - cls, sql, username=None, password=None, host=None, db=None, port=None - ): + def from_redshift(cls, sql, username=None, password=None, host=None, db=None, port=None): """ Create a ``parsons table`` from a Redshift query. @@ -850,9 +838,7 @@ def from_redshift( return rs.query(sql) @classmethod - def from_postgres( - cls, sql, username=None, password=None, host=None, db=None, port=None - ): + def from_postgres(cls, sql, username=None, password=None, host=None, db=None, port=None): """ Args: sql: str diff --git a/parsons/facebook_ads/facebook_ads.py b/parsons/facebook_ads/facebook_ads.py index 0733cfed8b..42353bf578 100644 --- a/parsons/facebook_ads/facebook_ads.py +++ b/parsons/facebook_ads/facebook_ads.py @@ -74,9 +74,7 @@ class FacebookAds(object): "DOB YYYYMMDD": ["dob", "vb_voterbase_dob", "vb_tsmart_dob"] } - def __init__( - self, app_id=None, app_secret=None, access_token=None, ad_account_id=None - ): + def __init__(self, app_id=None, app_secret=None, access_token=None, ad_account_id=None): try: self.app_id = app_id or os.environ["FB_APP_ID"] @@ -85,8 +83,7 @@ def __init__( self.ad_account_id = ad_account_id or os.environ["FB_AD_ACCOUNT_ID"] except KeyError as error: logger.error( - "FB Marketing API credentials missing. Must be specified as env vars " - "or kwargs" + "FB Marketing API credentials missing. Must be specified as env vars " "or kwargs" ) raise error @@ -120,15 +117,9 @@ def _preprocess_dob_column(table, column): # Parse the DOB column into 3 new columns, and remove the original column # TODO Throw an error if the values are not 6 characters long? - table.add_column( - FBKeySchema.doby, lambda row: row[column][:4] if row[column] else None - ) - table.add_column( - FBKeySchema.dobm, lambda row: row[column][4:6] if row[column] else None - ) - table.add_column( - FBKeySchema.dobd, lambda row: row[column][6:8] if row[column] else None - ) + table.add_column(FBKeySchema.doby, lambda row: row[column][:4] if row[column] else None) + table.add_column(FBKeySchema.dobm, lambda row: row[column][4:6] if row[column] else None) + table.add_column(FBKeySchema.dobd, lambda row: row[column][6:8] if row[column] else None) table.remove_column(column) @staticmethod @@ -281,9 +272,7 @@ def _add_batch_to_custom_audience( # Note that the FB SDK handles basic normalization and hashing of the data CustomAudience(audience_id).add_users(schema, batch, is_raw=True) - logger.info( - f"Added {added_so_far+len(batch)}/{total_rows} users to custom audience..." - ) + logger.info(f"Added {added_so_far+len(batch)}/{total_rows} users to custom audience...") def add_users_to_custom_audience(self, audience_id, users_table): """ @@ -361,8 +350,7 @@ def add_users_to_custom_audience(self, audience_id, users_table): """ # noqa: E501,E261 logger.info( - f"Adding custom audience users from provided table with " - f"{users_table.num_rows} rows" + f"Adding custom audience users from provided table with " f"{users_table.num_rows} rows" ) match_table = FacebookAds.get_match_table_for_users_table(users_table) diff --git a/parsons/formstack/formstack.py b/parsons/formstack/formstack.py index 45d4e8c6e3..94481b485a 100644 --- a/parsons/formstack/formstack.py +++ b/parsons/formstack/formstack.py @@ -114,9 +114,7 @@ def get_folders(self) -> Table: tbl.remove_column("subfolders") return tbl - def get_forms( - self, form_name: Optional[str] = None, folder_id: Optional[int] = None - ) -> Table: + def get_forms(self, form_name: Optional[str] = None, folder_id: Optional[int] = None) -> Table: """ Get all forms on your account. diff --git a/parsons/github/github.py b/parsons/github/github.py index 95f4ee3315..2487e468c3 100644 --- a/parsons/github/github.py +++ b/parsons/github/github.py @@ -77,9 +77,7 @@ def __init__(self, username=None, password=None, access_token=None): self.username = check_env.check("GITHUB_USERNAME", username, optional=True) self.password = check_env.check("GITHUB_PASSWORD", password, optional=True) - self.access_token = check_env.check( - "GITHUB_ACCESS_TOKEN", access_token, optional=True - ) + self.access_token = check_env.check("GITHUB_ACCESS_TOKEN", access_token, optional=True) if self.username and self.password: self.client = PyGithub(self.username, self.password) @@ -195,9 +193,7 @@ def list_organization_repos(self, organization_name, page=None, page_size=100): Table with page of organization repos """ - logger.info( - f"Listing page {page} of repos for organization {organization_name}" - ) + logger.info(f"Listing page {page} of repos for organization {organization_name}") return self._as_table( self.client.get_organization(organization_name).get_repos(), @@ -406,9 +402,7 @@ def download_file(self, repo_name, path, branch=None, local_path=None): if branch is None: branch = repo.default_branch - logger.info( - f"Downloading {path} from {repo_name}, branch {branch} to {local_path}" - ) + logger.info(f"Downloading {path} from {repo_name}, branch {branch} to {local_path}") headers = None if self.access_token: @@ -435,9 +429,7 @@ def download_file(self, repo_name, path, branch=None, local_path=None): return local_path - def download_table( - self, repo_name, path, branch=None, local_path=None, delimiter="," - ): + def download_table(self, repo_name, path, branch=None, local_path=None, delimiter=","): """Download a CSV file from a repo by path and branch as a Parsons Table. Args: diff --git a/parsons/google/google_admin.py b/parsons/google/google_admin.py index c1141d4b9d..cf5c730929 100644 --- a/parsons/google/google_admin.py +++ b/parsons/google/google_admin.py @@ -48,9 +48,7 @@ def _paginate_request(self, endpoint, collection, params=None): # Return type from Google Admin is a tuple of length 2. Extract desired result from 2nd item # in tuple and convert to json - res = json.loads( - self.client.request(req_url + param_str, "GET")[1].decode("utf-8") - ) + res = json.loads(self.client.request(req_url + param_str, "GET")[1].decode("utf-8")) # Paginate ret = [] @@ -63,9 +61,9 @@ def _paginate_request(self, endpoint, collection, params=None): else: param_arr[-1] = "pageToken=" + res["nextPageToken"] res = json.loads( - self.client.request(req_url + "?" + "&".join(param_arr), "GET")[ - 1 - ].decode("utf-8") + self.client.request(req_url + "?" + "&".join(param_arr), "GET")[1].decode( + "utf-8" + ) ) ret += res[collection] @@ -84,9 +82,7 @@ def get_aliases(self, group_key, params=None): `Returns:` Table Class """ - return self._paginate_request( - "groups/" + group_key + "/aliases", "aliases", params - ) + return self._paginate_request("groups/" + group_key + "/aliases", "aliases", params) def get_all_group_members(self, group_key, params=None): """ @@ -101,9 +97,7 @@ def get_all_group_members(self, group_key, params=None): `Returns:` Table Class """ - return self._paginate_request( - "groups/" + group_key + "/members", "members", params - ) + return self._paginate_request("groups/" + group_key + "/members", "members", params) def get_all_groups(self, params=None): """ diff --git a/parsons/google/google_bigquery.py b/parsons/google/google_bigquery.py index c020ca684e..83f0f905fb 100644 --- a/parsons/google/google_bigquery.py +++ b/parsons/google/google_bigquery.py @@ -603,9 +603,7 @@ def copy_large_compressed_file_from_gcs( compression_type=compression_type, ) - logger.debug( - f"Loading uncompressed uri into BigQuery {uncompressed_gcs_uri}..." - ) + logger.debug(f"Loading uncompressed uri into BigQuery {uncompressed_gcs_uri}...") table_ref = self.get_table_ref(table_name=table_name) return self._load_table_from_uri( source_uris=uncompressed_gcs_uri, @@ -616,9 +614,7 @@ def copy_large_compressed_file_from_gcs( finally: if uncompressed_gcs_uri: - new_bucket_name, new_blob_name = gcs.split_uri( - gcs_uri=uncompressed_gcs_uri - ) + new_bucket_name, new_blob_name = gcs.split_uri(gcs_uri=uncompressed_gcs_uri) gcs.delete_blob(new_bucket_name, new_blob_name) logger.debug("Successfully dropped uncompressed blob") @@ -701,9 +697,7 @@ def copy_s3( aws_s3_key=key, ) temp_blob_name = key - temp_blob_uri = gcs_client.format_uri( - bucket=tmp_gcs_bucket, name=temp_blob_name - ) + temp_blob_uri = gcs_client.format_uri(bucket=tmp_gcs_bucket, name=temp_blob_name) # load CSV from Cloud Storage into BigQuery try: @@ -801,13 +795,9 @@ def copy( schema = [] for column in tbl.columns: try: - schema_row = [ - i for i in job_config.schema if i.name.lower() == column.lower() - ][0] + schema_row = [i for i in job_config.schema if i.name.lower() == column.lower()][0] except IndexError: - raise IndexError( - f"Column found in Table that was not found in schema: {column}" - ) + raise IndexError(f"Column found in Table that was not found in schema: {column}") schema.append(schema_row) job_config.schema = schema @@ -1324,9 +1314,7 @@ def _validate_copy_inputs(self, if_exists: str, data_type: str): '"append", "drop", "truncate", or "fail"' ) if data_type not in ["csv", "json"]: - raise ValueError( - f"Only supports csv or json files [data_type = {data_type}]" - ) + raise ValueError(f"Only supports csv or json files [data_type = {data_type}]") def _load_table_from_uri(self, source_uris, destination, job_config, **load_kwargs): try: @@ -1341,9 +1329,7 @@ def _load_table_from_uri(self, source_uris, destination, job_config, **load_kwar except exceptions.BadRequest as e: for idx, error_ in enumerate(load_job.errors): if idx == 0: - logger.error( - "* Load job failed. Enumerating errors collection below:" - ) + logger.error("* Load job failed. Enumerating errors collection below:") logger.error(f"** Error collection - index {idx}:") logger.error(error_) diff --git a/parsons/google/google_cloud_storage.py b/parsons/google/google_cloud_storage.py index 5b64eaae48..9c64693f41 100644 --- a/parsons/google/google_cloud_storage.py +++ b/parsons/google/google_cloud_storage.py @@ -288,9 +288,7 @@ def delete_blob(self, bucket_name, blob_name): blob.delete() logger.info(f"{blob_name} blob in {bucket_name} bucket deleted.") - def upload_table( - self, table, bucket_name, blob_name, data_type="csv", default_acl=None - ): + def upload_table(self, table, bucket_name, blob_name, data_type="csv", default_acl=None): """ Load the data from a Parsons table into a blob. @@ -319,9 +317,7 @@ def upload_table( local_file = table.to_json() content_type = "application/json" else: - raise ValueError( - f"Unknown data_type value ({data_type}): must be one of: csv or json" - ) + raise ValueError(f"Unknown data_type value ({data_type}): must be one of: csv or json") try: blob.upload_from_filename( @@ -391,9 +387,7 @@ def copy_bucket_to_gcs( Secret key to authenticate storage transfer """ if source not in ["gcs", "s3"]: - raise ValueError( - f"Blob transfer only supports gcs and s3 sources [source={source}]" - ) + raise ValueError(f"Blob transfer only supports gcs and s3 sources [source={source}]") if source_path and source_path[-1] != "/": raise ValueError("Source path much end in a '/'") @@ -580,13 +574,9 @@ def unzip_blob( } file_extension = compression_params[compression_type]["file_extension"] - compression_function = compression_params[compression_type][ - "compression_function" - ] + compression_function = compression_params[compression_type]["compression_function"] - compressed_filepath = self.download_blob( - bucket_name=bucket_name, blob_name=blob_name - ) + compressed_filepath = self.download_blob(bucket_name=bucket_name, blob_name=blob_name) decompressed_filepath = compressed_filepath.replace(file_extension, "") decompressed_blob_name = ( @@ -618,9 +608,7 @@ def __gzip_decompress_and_write_to_gcs(self, **kwargs): bucket_name = kwargs.pop("bucket_name") with gzip.open(compressed_filepath, "rb") as f_in: - logger.debug( - f"Uploading uncompressed file to GCS: {decompressed_blob_name}" - ) + logger.debug(f"Uploading uncompressed file to GCS: {decompressed_blob_name}") bucket = self.get_bucket(bucket_name=bucket_name) blob = storage.Blob(name=decompressed_blob_name, bucket=bucket) blob.upload_from_file(file_obj=f_in, rewind=True, timeout=3600) @@ -640,9 +628,7 @@ def __zip_decompress_and_write_to_gcs(self, **kwargs): with zipfile.ZipFile(compressed_filepath) as path_: # Open the underlying file with path_.open(decompressed_blob_in_archive) as f_in: - logger.debug( - f"Uploading uncompressed file to GCS: {decompressed_blob_name}" - ) + logger.debug(f"Uploading uncompressed file to GCS: {decompressed_blob_name}") bucket = self.get_bucket(bucket_name=bucket_name) blob = storage.Blob(name=decompressed_blob_name, bucket=bucket) blob.upload_from_file(file_obj=f_in, rewind=True, timeout=3600) diff --git a/parsons/google/google_sheets.py b/parsons/google/google_sheets.py index 5890763a01..67deaefc1c 100644 --- a/parsons/google/google_sheets.py +++ b/parsons/google/google_sheets.py @@ -32,9 +32,7 @@ def __init__(self, google_keyfile_dict=None, subject=None): "https://www.googleapis.com/auth/drive", ] - setup_google_application_credentials( - google_keyfile_dict, "GOOGLE_DRIVE_CREDENTIALS" - ) + setup_google_application_credentials(google_keyfile_dict, "GOOGLE_DRIVE_CREDENTIALS") google_credential_file = open(os.environ["GOOGLE_DRIVE_CREDENTIALS"]) credentials_dict = json.load(google_credential_file) @@ -49,16 +47,12 @@ def _get_worksheet(self, spreadsheet_id, worksheet=0): # Check if the worksheet is an integer, if so find the sheet by index if isinstance(worksheet, int): - return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet( - worksheet - ) + return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet(worksheet) elif isinstance(worksheet, str): idx = self.list_worksheets(spreadsheet_id).index(worksheet) try: - return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet( - idx - ) + return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet(idx) except: # noqa: E722 raise ValueError(f"Couldn't find worksheet {worksheet}") @@ -282,9 +276,7 @@ def append_to_sheet( # If the existing sheet is blank, then just overwrite the table. if existing_table.num_rows == 0: - return self.overwrite_sheet( - spreadsheet_id, table, worksheet, user_entered_value - ) + return self.overwrite_sheet(spreadsheet_id, table, worksheet, user_entered_value) cells = [] for row_num, row in enumerate(table.data): diff --git a/parsons/hustle/hustle.py b/parsons/hustle/hustle.py index 1d07bc3831..70c58e192e 100644 --- a/parsons/hustle/hustle.py +++ b/parsons/hustle/hustle.py @@ -47,9 +47,7 @@ def _get_auth_token(self, client_id, client_secret): logger.debug(r.json()) self.auth_token = r.json()["access_token"] - self.token_expiration = datetime.datetime.now() + datetime.timedelta( - seconds=7200 - ) + self.token_expiration = datetime.datetime.now() + datetime.timedelta(seconds=7200) logger.info("Authentication token generated") def _token_check(self): @@ -66,9 +64,7 @@ def _token_check(self): pass - def _request( - self, endpoint, req_type="GET", args=None, payload=None, raise_on_error=True - ): + def _request(self, endpoint, req_type="GET", args=None, payload=None, raise_on_error=True): url = self.uri + endpoint self._token_check() @@ -155,9 +151,7 @@ def get_agent(self, agent_id): logger.info(f"Got {agent_id} agent.") return r - def create_agent( - self, group_id, name, full_name, phone_number, send_invite=False, email=None - ): + def create_agent(self, group_id, name, full_name, phone_number, send_invite=False, email=None): """ Create an agent. @@ -190,9 +184,7 @@ def create_agent( agent = json_format.remove_empty_keys(agent) logger.info(f"Generating {full_name} agent.") - return self._request( - f"groups/{group_id}/agents", req_type="POST", payload=agent - ) + return self._request(f"groups/{group_id}/agents", req_type="POST", payload=agent) def update_agent(self, agent_id, name=None, full_name=None, send_invite=False): """ @@ -328,9 +320,7 @@ def get_leads(self, organization_id=None, group_id=None): raise ValueError("Either organization_id or group_id required.") if organization_id is not None and group_id is not None: - raise ValueError( - "Only one of organization_id and group_id may be populated." - ) + raise ValueError("Only one of organization_id and group_id may be populated.") if organization_id: endpoint = f"organizations/{organization_id}/leads" @@ -463,9 +453,7 @@ def create_leads(self, table, group_id=None): # Group Id check if not group_id and "group_id" not in table.columns: - raise ValueError( - "Group Id must be passed as an argument or a column value." - ) + raise ValueError("Group Id must be passed as an argument or a column value.") if group_id: lead["group_id"] = group_id diff --git a/parsons/mailchimp/mailchimp.py b/parsons/mailchimp/mailchimp.py index 9ddb065093..ee37462e76 100644 --- a/parsons/mailchimp/mailchimp.py +++ b/parsons/mailchimp/mailchimp.py @@ -381,9 +381,7 @@ def get_campaign_emails( "since": since, } - response = self.client.get_request( - f"reports/{campaign_id}/email-activity", params=params - ) + response = self.client.get_request(f"reports/{campaign_id}/email-activity", params=params) tbl = Table(response["emails"]) if tbl.num_rows > 0: return tbl @@ -425,9 +423,7 @@ def get_unsubscribes( "offset": offset, } - response = self.client.get_request( - f"reports/{campaign_id}/unsubscribed", params=params - ) + response = self.client.get_request(f"reports/{campaign_id}/unsubscribed", params=params) tbl = Table(response["unsubscribes"]) logger.info(f"Found {tbl.num_rows} unsubscribes for {campaign_id}.") if tbl.num_rows > 0: diff --git a/parsons/mobilecommons/mobilecommons.py b/parsons/mobilecommons/mobilecommons.py index 41acf31386..3f2c9fbf5e 100644 --- a/parsons/mobilecommons/mobilecommons.py +++ b/parsons/mobilecommons/mobilecommons.py @@ -84,9 +84,7 @@ def _mc_get_request( # Set get request params params = {"limit": page_limit, **self.default_params, **params} - logger.info( - f"Working on fetching first {page_limit} rows. This can take a long time." - ) + logger.info(f"Working on fetching first {page_limit} rows. This can take a long time.") # Make get call and parse XML into list of dicts page = 1 @@ -145,18 +143,14 @@ def _mc_get_request( f"of {limit}" ) # Send get request - response_dict = self._parse_get_request( - endpoint=endpoint, params=page_params - ) + response_dict = self._parse_get_request(endpoint=endpoint, params=page_params) # Check to see if page was empty if num parameter is available if page_indicator == "num": empty_page = int(response_dict["response"][first_data_key]["num"]) > 0 if not empty_page: # Extract data - response_table = Table( - response_dict["response"][first_data_key][second_data_key] - ) + response_table = Table(response_dict["response"][first_data_key][second_data_key]) # Append to final table final_table.concat(response_table) final_table.materialize() diff --git a/parsons/mobilize_america/ma.py b/parsons/mobilize_america/ma.py index 43e05d217a..5d27173671 100644 --- a/parsons/mobilize_america/ma.py +++ b/parsons/mobilize_america/ma.py @@ -115,12 +115,7 @@ def get_promoted_organizations(self, organization_id): `Returns` Parsons Table """ - url = ( - self.uri - + "organizations/" - + str(organization_id) - + "/promoted_organizations" - ) + url = self.uri + "organizations/" + str(organization_id) + "/promoted_organizations" return Table(self._request_paginate(url, auth=True)) def get_events( diff --git a/parsons/nation_builder/nation_builder.py b/parsons/nation_builder/nation_builder.py index 3b30d4fa00..a041e16c01 100644 --- a/parsons/nation_builder/nation_builder.py +++ b/parsons/nation_builder/nation_builder.py @@ -25,9 +25,7 @@ class NationBuilder: The Nation Builder access_token Not required if ``NB_ACCESS_TOKEN`` env variable set. """ - def __init__( - self, slug: Optional[str] = None, access_token: Optional[str] = None - ) -> None: + def __init__(self, slug: Optional[str] = None, access_token: Optional[str] = None) -> None: slug = check_env.check("NB_SLUG", slug) token = check_env.check("NB_ACCESS_TOKEN", access_token) @@ -152,9 +150,7 @@ def update_person(self, person_id: str, person: Dict[str, Any]) -> Dict[str, Any return response - def upsert_person( - self, person: Dict[str, Any] - ) -> Tuple[bool, Optional[Dict[str, Any]]]: + def upsert_person(self, person: Dict[str, Any]) -> Tuple[bool, Optional[Dict[str, Any]]]: """ Updates a matched person or creates a new one if the person doesn't exist. diff --git a/parsons/ngpvan/activist_codes.py b/parsons/ngpvan/activist_codes.py index fb6904d861..a426e9ccf2 100644 --- a/parsons/ngpvan/activist_codes.py +++ b/parsons/ngpvan/activist_codes.py @@ -57,15 +57,12 @@ def toggle_activist_code( r = self.apply_response(id, response, id_type, omit_contact=omit_contact) logger.info( - f"{id_type.upper()} {id} {action.capitalize()} " - + f"activist code {activist_code_id}" + f"{id_type.upper()} {id} {action.capitalize()} " + f"activist code {activist_code_id}" ) return r - def apply_activist_code( - self, id, activist_code_id, id_type="vanid", omit_contact=True - ): + def apply_activist_code(self, id, activist_code_id, id_type="vanid", omit_contact=True): """ Apply an activist code to or from a person. @@ -108,6 +105,4 @@ def remove_activist_code(self, id, activist_code_id, id_type="vanid"): ``None`` """ - return self.toggle_activist_code( - id, activist_code_id, "Remove", id_type=id_type - ) + return self.toggle_activist_code(id, activist_code_id, "Remove", id_type=id_type) diff --git a/parsons/ngpvan/bulk_import.py b/parsons/ngpvan/bulk_import.py index c0057f0678..c0c7ddb0ad 100644 --- a/parsons/ngpvan/bulk_import.py +++ b/parsons/ngpvan/bulk_import.py @@ -1,4 +1,5 @@ """NGPVAN Bulk Import Endpoints""" + from parsons.etl.table import Table from parsons.utilities import cloud_storage @@ -109,9 +110,7 @@ def get_bulk_import_mapping_type_fields(self, type_name, field_name): A mapping type fields json """ - r = self.connection.get_request( - f"bulkImportMappingTypes/{type_name}/{field_name}/values" - ) + r = self.connection.get_request(f"bulkImportMappingTypes/{type_name}/{field_name}/values") logger.info(f"Found {type_name} bulk import mapping type field values.") return r diff --git a/parsons/ngpvan/changed_entities.py b/parsons/ngpvan/changed_entities.py index e71da4116f..99c269f673 100644 --- a/parsons/ngpvan/changed_entities.py +++ b/parsons/ngpvan/changed_entities.py @@ -37,11 +37,7 @@ def get_changed_entity_resource_fields(self, resource_type): See :ref:`parsons-table` for output options. """ - tbl = Table( - self.connection.get_request( - f"changedEntityExportJobs/fields/{resource_type}" - ) - ) + tbl = Table(self.connection.get_request(f"changedEntityExportJobs/fields/{resource_type}")) logger.info(f"Found {tbl.num_rows} fields for {resource_type}.") return tbl diff --git a/parsons/ngpvan/codes.py b/parsons/ngpvan/codes.py index 40f0c32cdb..689b2695bb 100644 --- a/parsons/ngpvan/codes.py +++ b/parsons/ngpvan/codes.py @@ -1,4 +1,5 @@ """NGPVAN Code Endpoints""" + from parsons.etl.table import Table import logging @@ -10,9 +11,7 @@ def __init__(self, van_connection): self.connection = van_connection - def get_codes( - self, name=None, supported_entities=None, parent_code_id=None, code_type=None - ): + def get_codes(self, name=None, supported_entities=None, parent_code_id=None, code_type=None): """ Get codes. diff --git a/parsons/ngpvan/contact_notes.py b/parsons/ngpvan/contact_notes.py index 04a118ff43..11f543b9a5 100644 --- a/parsons/ngpvan/contact_notes.py +++ b/parsons/ngpvan/contact_notes.py @@ -26,9 +26,7 @@ def get_contact_notes(self, van_id): logger.info(f"Found {tbl.num_rows} custom fields.") return tbl - def create_contact_note( - self, van_id, text, is_view_restricted, note_category_id=None - ): + def create_contact_note(self, van_id, text, is_view_restricted, note_category_id=None): """ Create a contact note diff --git a/parsons/ngpvan/events.py b/parsons/ngpvan/events.py index c0c5d17ab3..799bca12b5 100644 --- a/parsons/ngpvan/events.py +++ b/parsons/ngpvan/events.py @@ -108,9 +108,7 @@ def get_event( if expand_fields: expand_fields = ",".join(expand_fields) - r = self.connection.get_request( - f"events/{event_id}", params={"$expand": expand_fields} - ) + r = self.connection.get_request(f"events/{event_id}", params={"$expand": expand_fields}) logger.info(f"Found event {event_id}.") return r @@ -189,9 +187,7 @@ def create_event( """ if shifts is None: - shifts = [ - {"name": "Default Shift", "startTime": start_date, "endTime": end_date} - ] + shifts = [{"name": "Default Shift", "startTime": start_date, "endTime": end_date}] else: shifts = [ { @@ -219,9 +215,7 @@ def create_event( } if location_ids: - event["locations"] = ( - [{"locationId": location_id} for location_id in location_ids], - ) + event["locations"] = ([{"locationId": location_id} for location_id in location_ids],) if code_ids: event["codes"] = [{"codeID": c} for c in code_ids] diff --git a/parsons/ngpvan/people.py b/parsons/ngpvan/people.py index f530cca17b..f8c5727f53 100644 --- a/parsons/ngpvan/people.py +++ b/parsons/ngpvan/people.py @@ -189,9 +189,7 @@ def update_person_json(self, id, id_type="vanid", match_json=None): A person dict """ - return self._people_search( - id=id, id_type=id_type, match_json=match_json, create=True - ) + return self._people_search(id=id, id_type=id_type, match_json=match_json, create=True) def upsert_person( self, @@ -328,9 +326,7 @@ def _people_search( elif isinstance(email, list): json["emails"] = email else: - raise ValueError( - f"Unexpected data type for email argument: {type(email)}" - ) + raise ValueError(f"Unexpected data type for email argument: {type(email)}") if phone: # To Do: Strip out non-integers from phone json["phones"] = [{"phoneNumber": phone, "phoneType": phone_type}] if date_of_birth: @@ -670,9 +666,7 @@ def apply_response( or contact_type_id == 149 # Paid SMS ): if not phone: - raise Exception( - "A phone number must be provided if canvassed via phone or SMS" - ) + raise Exception("A phone number must be provided if canvassed via phone or SMS") if phone: json["canvassContext"]["phone"] = { diff --git a/parsons/ngpvan/saved_lists.py b/parsons/ngpvan/saved_lists.py index 5423e64404..cb7de9c7c9 100644 --- a/parsons/ngpvan/saved_lists.py +++ b/parsons/ngpvan/saved_lists.py @@ -27,9 +27,7 @@ def get_saved_lists(self, folder_id=None): See :ref:`parsons-table` for output options. """ - tbl = Table( - self.connection.get_request("savedLists", params={"folderId": folder_id}) - ) + tbl = Table(self.connection.get_request("savedLists", params={"folderId": folder_id})) logger.info(f"Found {tbl.num_rows} saved lists.") return tbl @@ -127,9 +125,7 @@ def upload_saved_list_rest( """ rando = str(uuid.uuid1()) file_name = rando + ".csv" - url = cloud_storage.post_file( - tbl, url_type, file_path=rando + ".zip", **url_kwargs - ) + url = cloud_storage.post_file(tbl, url_type, file_path=rando + ".zip", **url_kwargs) logger.info(f"Table uploaded to {url_type}.") # VAN errors for this method are not particularly useful or helpful. For that reason, we @@ -138,10 +134,7 @@ def upload_saved_list_rest( if folder_id not in [x["folderId"] for x in self.get_folders()]: raise ValueError("Folder does not exist or is not shared with API user.") - if ( - list_name in [x["name"] for x in self.get_saved_lists(folder_id)] - and not overwrite - ): + if list_name in [x["name"] for x in self.get_saved_lists(folder_id)] and not overwrite: raise ValueError( "Saved list already exists. Set overwrite " "argument to list ID or change list name." @@ -179,13 +172,10 @@ def upload_saved_list_rest( if overwrite: json["actions"][0]["overwriteExistingListId"] = overwrite - file_load_job_response = self.connection.post_request( - "fileLoadingJobs", json=json - ) + file_load_job_response = self.connection.post_request("fileLoadingJobs", json=json) job_id = file_load_job_response["jobId"] logger.info( - f"Saved list job {job_id} created. Reference " - "callback url to check for job status" + f"Saved list job {job_id} created. Reference " "callback url to check for job status" ) return file_load_job_response @@ -231,9 +221,7 @@ def upload_saved_list( """ # Move to cloud storage file_name = str(uuid.uuid1()) - url = cloud_storage.post_file( - tbl, url_type, file_path=file_name + ".zip", **url_kwargs - ) + url = cloud_storage.post_file(tbl, url_type, file_path=file_name + ".zip", **url_kwargs) logger.info(f"Table uploaded to {url_type}.") # VAN errors for this method are not particularly useful or helpful. For that reason, we @@ -258,9 +246,7 @@ def upload_saved_list( "uploading a list of vanids." ) # Create XML - xml = self.connection.soap_client.factory.create( - "CreateAndStoreSavedListMetaData" - ) + xml = self.connection.soap_client.factory.create("CreateAndStoreSavedListMetaData") xml.SavedList._Name = list_name xml.DestinationFolder._ID = folder_id xml.SourceFile.FileName = file_name + ".csv" @@ -269,9 +255,7 @@ def upload_saved_list( xml.Options.OverwriteExistingList = replace # Describe file - file_desc = self.connection.soap_client.factory.create( - "SeparatedFileFormatDescription" - ) + file_desc = self.connection.soap_client.factory.create("SeparatedFileFormatDescription") file_desc._name = "csv" file_desc.HasHeaderRow = True @@ -285,9 +269,7 @@ def upload_saved_list( file_desc.Columns.Column.append(col) xml.SourceFile.Format = file_desc - r = Client.dict( - self.connection.soap_client.service.CreateAndStoreSavedList(xml) - ) + r = Client.dict(self.connection.soap_client.service.CreateAndStoreSavedList(xml)) if r: logger.info(f"Uploaded {r['ListSize']} records to {r['_Name']} saved list.") return r @@ -348,9 +330,7 @@ def get_export_job_types(self): logger.info(f"Found {tbl.num_rows} export job types.") return tbl - def export_job_create( - self, list_id, export_type=4, webhookUrl="https://www.nothing.com" - ): + def export_job_create(self, list_id, export_type=4, webhookUrl="https://www.nothing.com"): """ Creates an export job diff --git a/parsons/ngpvan/scores.py b/parsons/ngpvan/scores.py index 1a652d513b..8f5dd57043 100644 --- a/parsons/ngpvan/scores.py +++ b/parsons/ngpvan/scores.py @@ -182,9 +182,7 @@ def upload_scores( # Move to cloud storage file_name = str(uuid.uuid1()) - url = cloud_storage.post_file( - tbl, url_type, file_path=file_name + ".zip", **url_kwargs - ) + url = cloud_storage.post_file(tbl, url_type, file_path=file_name + ".zip", **url_kwargs) logger.info(f"Table uploaded to {url_type}.") # Generate shell request diff --git a/parsons/ngpvan/signups.py b/parsons/ngpvan/signups.py index b5487551cb..9bc34dc13d 100644 --- a/parsons/ngpvan/signups.py +++ b/parsons/ngpvan/signups.py @@ -1,4 +1,5 @@ """NGPVAN Signups Endpoints""" + from parsons.etl.table import Table import logging @@ -69,9 +70,7 @@ def get_event_signups(self, event_id): See :ref:`parsons-table` for output options. """ - tbl = Table( - self.connection.get_request("signups", params={"eventId": event_id}) - ) + tbl = Table(self.connection.get_request("signups", params={"eventId": event_id})) logger.info(f"Found {tbl.num_rows} signups for event {event_id}.") return self._unpack_signups(tbl) diff --git a/parsons/ngpvan/supporter_groups.py b/parsons/ngpvan/supporter_groups.py index d50ba0c013..1788d523e7 100644 --- a/parsons/ngpvan/supporter_groups.py +++ b/parsons/ngpvan/supporter_groups.py @@ -1,4 +1,5 @@ """NGPVAN Supporter Groups Endpoints""" + from parsons.etl.table import Table import logging @@ -84,9 +85,7 @@ def add_person_supporter_group(self, supporter_group_id, vanid): ``None`` """ - r = self.connection.put_request( - f"supporterGroups/{supporter_group_id}/people/{vanid}" - ) + r = self.connection.put_request(f"supporterGroups/{supporter_group_id}/people/{vanid}") logger.info(f"Added person {vanid} to {supporter_group_id} supporter group.") return r @@ -103,10 +102,6 @@ def delete_person_supporter_group(self, supporter_group_id, vanid): ``None`` """ - r = self.connection.delete_request( - f"supporterGroups/{supporter_group_id}/people/{vanid}" - ) - logger.info( - f"Deleted person {vanid} from {supporter_group_id} supporter group." - ) + r = self.connection.delete_request(f"supporterGroups/{supporter_group_id}/people/{vanid}") + logger.info(f"Deleted person {vanid} from {supporter_group_id} supporter group.") return r diff --git a/parsons/ngpvan/survey_questions.py b/parsons/ngpvan/survey_questions.py index a00bc25fe1..0a92ee11e4 100644 --- a/parsons/ngpvan/survey_questions.py +++ b/parsons/ngpvan/survey_questions.py @@ -1,4 +1,5 @@ """NGPVAN Survey Questions Endpoints""" + from parsons.etl.table import Table import logging diff --git a/parsons/ngpvan/targets.py b/parsons/ngpvan/targets.py index ee6490e7bf..104fc6d020 100644 --- a/parsons/ngpvan/targets.py +++ b/parsons/ngpvan/targets.py @@ -63,9 +63,7 @@ def get_target_export(self, export_job_id): url = response["file"]["downloadUrl"] return Table(petl.fromcsv(url, encoding="utf-8-sig")) elif job_status == "Pending" or job_status == "InProcess": - logger.info( - f"Target export job is pending or in process for {export_job_id}." - ) + logger.info(f"Target export job is pending or in process for {export_job_id}.") else: raise TargetsFailed(f"Target export failed for {export_job_id}") diff --git a/parsons/ngpvan/van.py b/parsons/ngpvan/van.py index b019e391f5..c57bf835cb 100644 --- a/parsons/ngpvan/van.py +++ b/parsons/ngpvan/van.py @@ -61,9 +61,7 @@ class VAN( VAN object """ - def __init__( - self, api_key=None, auth_name="default", db=None, raise_for_status=True - ): + def __init__(self, api_key=None, auth_name="default", db=None, raise_for_status=True): self.connection = VANConnector(api_key=api_key, db=db) self.api_key = api_key diff --git a/parsons/notifications/gmail.py b/parsons/notifications/gmail.py index 29ceafdde2..b452c6f30e 100644 --- a/parsons/notifications/gmail.py +++ b/parsons/notifications/gmail.py @@ -72,10 +72,7 @@ def _send_message(self, msg): try: message = ( - self.service.users() - .messages() - .send(userId=self.user_id, body=message) - .execute() + self.service.users().messages().send(userId=self.user_id, body=message).execute() ) except errors.HttpError: self.log.exception("An error occurred: while attempting to send a message.") diff --git a/parsons/notifications/sendmail.py b/parsons/notifications/sendmail.py index bf7873454d..f228f99c23 100644 --- a/parsons/notifications/sendmail.py +++ b/parsons/notifications/sendmail.py @@ -153,9 +153,7 @@ def _create_message_attachments( fp.close() content_type, encoding = mimetypes.guess_type(filename) - self.log.debug( - f"(File: {f}, Content-type: {content_type}, " f"Encoding: {encoding})" - ) + self.log.debug(f"(File: {f}, Content-type: {content_type}, " f"Encoding: {encoding})") if content_type is None or encoding is not None: content_type = "application/octet-stream" @@ -202,9 +200,7 @@ def _validate_email_string(self, str): return True - def send_email( - self, sender, to, subject, message_text, message_html=None, files=None - ): + def send_email(self, sender, to, subject, message_text, message_html=None, files=None): """Send an email message. `Args:` @@ -248,9 +244,7 @@ def send_email( elif not files: msg_type = "html" - msg = self._create_message_html( - sender, to, subject, message_text, message_html - ) + msg = self._create_message_html(sender, to, subject, message_text, message_html) else: msg_type = "attachments" if isinstance(files, str): diff --git a/parsons/notifications/slack.py b/parsons/notifications/slack.py index ced3c20fa2..b931fc6ddf 100644 --- a/parsons/notifications/slack.py +++ b/parsons/notifications/slack.py @@ -28,9 +28,7 @@ def __init__(self, api_key=None): self.client = SlackClient(self.api_key) - def channels( - self, fields=["id", "name"], exclude_archived=False, types=["public_channel"] - ): + def channels(self, fields=["id", "name"], exclude_archived=False, types=["public_channel"]): """ Return a list of all channels in a Slack team. @@ -58,12 +56,8 @@ def channels( exclude_archived=exclude_archived, ) - tbl.unpack_dict( - "topic", include_original=False, prepend=True, prepend_value="topic" - ) - tbl.unpack_dict( - "purpose", include_original=False, prepend=True, prepend_value="purpose" - ) + tbl.unpack_dict("topic", include_original=False, prepend=True, prepend_value="topic") + tbl.unpack_dict("purpose", include_original=False, prepend=True, prepend_value="purpose") rm_cols = [x for x in tbl.columns if x not in fields] tbl.remove_column(*rm_cols) @@ -96,9 +90,7 @@ def users( tbl = self._paginate_request("users.list", "members", include_locale=True) - tbl.unpack_dict( - "profile", include_original=False, prepend=True, prepend_value="profile" - ) + tbl.unpack_dict("profile", include_original=False, prepend=True, prepend_value="profile") rm_cols = [x for x in tbl.columns if x not in fields] tbl.remove_column(*rm_cols) diff --git a/parsons/notifications/smtp.py b/parsons/notifications/smtp.py index ae55ee2a64..40071224b9 100644 --- a/parsons/notifications/smtp.py +++ b/parsons/notifications/smtp.py @@ -35,9 +35,7 @@ def __init__( self.port = check("SMTP_PORT", port, optional=True) or 587 self.username = check("SMTP_USER", username) self.password = check("SMTP_PASSWORD", password) - self.tls = not ( - check("SMTP_TLS", tls, optional=True) in ("false", "False", "0", False) - ) + self.tls = not (check("SMTP_TLS", tls, optional=True) in ("false", "False", "0", False)) self.close_manually = close_manually self.conn = None @@ -73,9 +71,7 @@ def _send_message(self, message): raise if result: - self.log.warning( - "Message failed to send to some recipients: " + str(result) - ) + self.log.warning("Message failed to send to some recipients: " + str(result)) if not self.close_manually: conn.quit() self.conn = None diff --git a/parsons/pdi/acquisition_types.py b/parsons/pdi/acquisition_types.py index 83d2a6ffdc..54e51e2123 100644 --- a/parsons/pdi/acquisition_types.py +++ b/parsons/pdi/acquisition_types.py @@ -123,6 +123,4 @@ def update_acquisition_type( "acquisitionMethod": acquisition_method, "pageDefault": page_default, } - return self._request( - f"{self.url_acqtypes}/{id}", req_type="PUT", post_data=payload - ) + return self._request(f"{self.url_acqtypes}/{id}", req_type="PUT", post_data=payload) diff --git a/parsons/pdi/activities.py b/parsons/pdi/activities.py index 9f4bf33150..66a72ebe2c 100644 --- a/parsons/pdi/activities.py +++ b/parsons/pdi/activities.py @@ -49,6 +49,4 @@ def update_activity(self, id: str, activity_name: str, canvassing_shift: str): canvassing_shift bool: The canvassing shift """ payload = {"activityName": activity_name, "canvassingShift": canvassing_shift} - return self._request( - f"{self.url_activites}/{id}", req_type="PUT", post_data=payload - ) + return self._request(f"{self.url_activites}/{id}", req_type="PUT", post_data=payload) diff --git a/parsons/pdi/contacts.py b/parsons/pdi/contacts.py index 73dc0c0522..3fb105571d 100644 --- a/parsons/pdi/contacts.py +++ b/parsons/pdi/contacts.py @@ -174,9 +174,7 @@ def update_contact( "dateOfBirth": date_of_birth, "gender": gender, } - res = self._request( - f"{self.url_contacts}/{id}", req_type="PUT", post_data=payload - ) + res = self._request(f"{self.url_contacts}/{id}", req_type="PUT", post_data=payload) if res["code"] == 201: return True diff --git a/parsons/pdi/events.py b/parsons/pdi/events.py index 1f5164b8f2..39947fcad0 100644 --- a/parsons/pdi/events.py +++ b/parsons/pdi/events.py @@ -54,9 +54,7 @@ def get_event_invitations(self, event_id: str, expand=True, limit=None): params = {"expand": expand} - return self._request( - f"{self.events_url}/{event_id}/invitations", args=params, limit=limit - ) + return self._request(f"{self.events_url}/{event_id}/invitations", args=params, limit=limit) def create_event( self, @@ -226,9 +224,7 @@ def create_event_with_activity( response = self._request( self.eventactivities_url, req_type="POST", post_data=event_activity_payload ) - logger.info( - f"Created activity {activity_name} for event {event_name} (id: {event_id})" - ) + logger.info(f"Created activity {activity_name} for event {event_name} (id: {event_id})") return response @@ -341,9 +337,7 @@ def create_invitation( } if specific_occurrence_start: - event_invitation_payload[ - "specificOcurrenceStartUtc" - ] = specific_occurrence_start + event_invitation_payload["specificOcurrenceStartUtc"] = specific_occurrence_start response = self._request( self.events_url + f"/{event_id}/invitations", @@ -394,9 +388,7 @@ def update_invitation( if attended is not None: event_invitation_payload["attended"] = attended if specific_occurrence_start: - event_invitation_payload[ - "specificOcurrenceStartUtc" - ] = specific_occurrence_start + event_invitation_payload["specificOcurrenceStartUtc"] = specific_occurrence_start response = self._request( self.events_url + f"/{event_id}/invitations/{invitation_id}", diff --git a/parsons/pdi/flag_ids.py b/parsons/pdi/flag_ids.py index 4e2bd84a49..41d9c1fe24 100644 --- a/parsons/pdi/flag_ids.py +++ b/parsons/pdi/flag_ids.py @@ -84,9 +84,7 @@ def delete_flag_id(self, id): return True - def update_flag_id( - self, id, flag_id, is_default, flag_description=None, compile=None - ): + def update_flag_id(self, id, flag_id, is_default, flag_description=None, compile=None): """Update a flag id. `Args:` @@ -114,8 +112,6 @@ def update_flag_id( "compile": compile, "isDefault": is_default, } - data = self._request( - f"{self.url_flag_ids}/{id}", req_type="PUT", post_data=payload - ) + data = self._request(f"{self.url_flag_ids}/{id}", req_type="PUT", post_data=payload) return data["id"] diff --git a/parsons/pdi/locations.py b/parsons/pdi/locations.py index 8c7ceae47d..20893b9f1d 100644 --- a/parsons/pdi/locations.py +++ b/parsons/pdi/locations.py @@ -40,8 +40,6 @@ def get_location(self, id: str): def update_location(self, id: str, location_name: str, address: str): payload = {"locationName": location_name, "locationAddress": address} - res = self._request( - f"{self.locations_url}/{id}", req_type="PUT", post_data=payload - ) + res = self._request(f"{self.locations_url}/{id}", req_type="PUT", post_data=payload) if res["code"] == 201: return True diff --git a/parsons/pdi/pdi.py b/parsons/pdi/pdi.py index 5ca952a181..b1f197d220 100644 --- a/parsons/pdi/pdi.py +++ b/parsons/pdi/pdi.py @@ -148,9 +148,7 @@ def _request(self, url, req_type="GET", post_data=None, args=None, limit=None): args = args or {} args["cursor"] = cursor args["limit"] = LIMIT_MAX - res = request_fn[req_type]( - url, headers=headers, json=post_data, params=args - ) + res = request_fn[req_type](url, headers=headers, json=post_data, params=args) data.extend(res.json()["data"]) @@ -166,9 +164,7 @@ def _request(self, url, req_type="GET", post_data=None, args=None, limit=None): args = args or {} args["cursor"] = cursor args["limit"] = min(LIMIT_MAX, total_need - len(data)) - res = request_fn[req_type]( - url, headers=headers, json=post_data, params=args - ) + res = request_fn[req_type](url, headers=headers, json=post_data, params=args) data.extend(res.json()["data"]) diff --git a/parsons/phone2action/p2a.py b/parsons/phone2action/p2a.py index 1896066d70..6c02250acb 100644 --- a/parsons/phone2action/p2a.py +++ b/parsons/phone2action/p2a.py @@ -21,21 +21,15 @@ class Phone2Action(object): def __init__(self, app_id=None, app_key=None): self.capitol_canary = CapitolCanary(app_id, app_key) - logger.warning( - "The Phone2Action class is being deprecated and replaced by CapitalCanary" - ) + logger.warning("The Phone2Action class is being deprecated and replaced by CapitalCanary") def __getattr__(self, name): try: return getattr(self.capitol_canary, name) except AttributeError: - raise AttributeError( - f"{type(self).__name__} object has no attribute {name}" - ) + raise AttributeError(f"{type(self).__name__} object has no attribute {name}") - def get_advocates( - self, state=None, campaign_id=None, updated_since=None, page=None - ): + def get_advocates(self, state=None, campaign_id=None, updated_since=None, page=None): """ Return advocates (person records). @@ -64,9 +58,7 @@ def get_advocates( * fields * advocates """ - return self.capitol_canary.get_advocates( - state, campaign_id, updated_since, page - ) + return self.capitol_canary.get_advocates(state, campaign_id, updated_since, page) def get_campaigns( self, diff --git a/parsons/quickbase/quickbase.py b/parsons/quickbase/quickbase.py index b710b43f86..c2f8c50d7b 100644 --- a/parsons/quickbase/quickbase.py +++ b/parsons/quickbase/quickbase.py @@ -47,9 +47,7 @@ def get_app_tables(self, app_id=None): Table Class """ return Table( - self.client.request( - f"{self.api_hostname}/tables?appId={app_id}", "GET" - ).json() + self.client.request(f"{self.api_hostname}/tables?appId={app_id}", "GET").json() ) def query_records(self, table_from=None): diff --git a/parsons/redash/redash.py b/parsons/redash/redash.py index d68642a781..f00d065334 100644 --- a/parsons/redash/redash.py +++ b/parsons/redash/redash.py @@ -54,9 +54,7 @@ def __init__( def _catch_runtime_error(self, res): if res.status_code != 200: - raise RuntimeError( - f"Error. Status code: {res.status_code}. Reason: {res.reason}" - ) + raise RuntimeError(f"Error. Status code: {res.status_code}. Reason: {res.reason}") def _poll_job(self, session, job, query_id): start_secs = time.time() @@ -68,11 +66,7 @@ def _poll_job(self, session, job, query_id): response_json = response.json() job = response_json.get( "job", - { - "status": "Error NO JOB IN RESPONSE: {}".format( - json.dumps(response_json) - ) - }, + {"status": "Error NO JOB IN RESPONSE: {}".format(json.dumps(response_json))}, ) logger.debug( "poll url:%s id:%s status:%s err:%s", @@ -86,9 +80,7 @@ def _poll_job(self, session, job, query_id): if job["status"] == 3: # 3 = completed return job["query_result_id"] elif job["status"] == 4: # 3 = ERROR - raise RedashQueryFailed( - "Redash Query {} failed: {}".format(query_id, job["error"]) - ) + raise RedashQueryFailed("Redash Query {} failed: {}".format(query_id, job["error"])) def get_data_source(self, data_source_id): """ @@ -104,9 +96,7 @@ def get_data_source(self, data_source_id): self._catch_runtime_error(res) return res.json() - def update_data_source( - self, data_source_id, name, type, dbName, host, password, port, user - ): + def update_data_source(self, data_source_id, name, type, dbName, host, password, port, user): """ Update a data source. @@ -171,9 +161,7 @@ def get_fresh_query_results(self, query_id=None, params=None): query_id = check("REDASH_QUERY_ID", query_id, optional=True) params_from_env = check("REDASH_QUERY_PARAMS", "", optional=True) redash_params = ( - {"p_%s" % k: str(v).replace("'", "''") for k, v in params.items()} - if params - else {} + {"p_%s" % k: str(v).replace("'", "''") for k, v in params.items()} if params else {} ) response = self.session.post( @@ -183,9 +171,7 @@ def get_fresh_query_results(self, query_id=None, params=None): ) if response.status_code != 200: - raise RedashQueryFailed( - f"Refresh failed for query {query_id}. {response.text}" - ) + raise RedashQueryFailed(f"Refresh failed for query {query_id}. {response.text}") job = response.json()["job"] result_id = self._poll_job(self.session, job, query_id) @@ -199,9 +185,7 @@ def get_fresh_query_results(self, query_id=None, params=None): f"Failed getting results for query {query_id}. {response.text}" ) else: - raise RedashQueryFailed( - f"Failed getting result {query_id}. {response.text}" - ) + raise RedashQueryFailed(f"Failed getting result {query_id}. {response.text}") return Table.from_csv_string(response.text) def get_cached_query_results(self, query_id=None, query_api_key=None): @@ -229,9 +213,7 @@ def get_cached_query_results(self, query_id=None, query_api_key=None): verify=self.verify, ) if response.status_code != 200: - raise RedashQueryFailed( - f"Failed getting results for query {query_id}. {response.text}" - ) + raise RedashQueryFailed(f"Failed getting results for query {query_id}. {response.text}") return Table.from_csv_string(response.text) @classmethod @@ -272,10 +254,6 @@ def load_to_table(cls, refresh=True, **kwargs): } obj = cls(**initargs) if not refresh or kwargs.get("query_api_key"): - return obj.get_cached_query_results( - kwargs.get("query_id"), kwargs.get("query_api_key") - ) + return obj.get_cached_query_results(kwargs.get("query_id"), kwargs.get("query_api_key")) else: - return obj.get_fresh_query_results( - kwargs.get("query_id"), kwargs.get("params") - ) + return obj.get_fresh_query_results(kwargs.get("query_id"), kwargs.get("params")) diff --git a/parsons/rockthevote/rtv.py b/parsons/rockthevote/rtv.py index 3662da22ca..baa14da966 100644 --- a/parsons/rockthevote/rtv.py +++ b/parsons/rockthevote/rtv.py @@ -95,9 +95,7 @@ def create_registration_report(self, before=None, since=None, report_type=None): if report_type: if report_type not in VALID_REPORT_TYPES: - raise RTVFailure( - f"Invalid report type. Must be one of {VALID_REPORT_TYPES}" - ) + raise RTVFailure(f"Invalid report type. Must be one of {VALID_REPORT_TYPES}") report_parameters["report_type"] = report_type if since: since_date = parse_date(since).strftime(DATETIME_FORMAT) @@ -167,9 +165,7 @@ def get_registration_report( # Let's figure out at what time should we just give up because we waited # too long - end_time = datetime.datetime.now() + datetime.timedelta( - seconds=report_timeout_seconds - ) + end_time = datetime.datetime.now() + datetime.timedelta(seconds=report_timeout_seconds) # If we have a download URL, we can move on and just download the # report. Otherwise, as long as we haven't run out of time, we will @@ -216,9 +212,7 @@ def get_registration_report( # Transform the data from the report's CSV format to something more # Pythonic (snake case) - normalized_column_names = [ - re.sub(r"\s", "_", name).lower() for name in table.columns - ] + normalized_column_names = [re.sub(r"\s", "_", name).lower() for name in table.columns] normalized_column_names = [ re.sub(r"[^A-Za-z\d_]", "", name) for name in normalized_column_names ] @@ -261,8 +255,7 @@ def run_registration_report( """ report_str = f"{report_type} report" if report_type else "report" logger.info( - f"Running {report_str} for {self.partner_id} " - f"for dates: {since} to {before}..." + f"Running {report_str} for {self.partner_id} " f"for dates: {since} to {before}..." ) report_id = self.create_registration_report( before=before, since=since, report_type=report_type @@ -310,9 +303,7 @@ def get_state_requirements( if callback: params["callback"] = callback - requirements_response = self.client.request( - requirements_url, "get", params=params - ) + requirements_response = self.client.request(requirements_url, "get", params=params) if requirements_response.status_code == requests.codes.ok: response_json = requirements_response.json() diff --git a/parsons/salesforce/salesforce.py b/parsons/salesforce/salesforce.py index de57b3fcc4..e71a98f78c 100644 --- a/parsons/salesforce/salesforce.py +++ b/parsons/salesforce/salesforce.py @@ -28,15 +28,11 @@ class Salesforce: Salesforce class """ - def __init__( - self, username=None, password=None, security_token=None, test_environment=False - ): + def __init__(self, username=None, password=None, security_token=None, test_environment=False): self.username = check_env.check("SALESFORCE_USERNAME", username) self.password = check_env.check("SALESFORCE_PASSWORD", password) - self.security_token = check_env.check( - "SALESFORCE_SECURITY_TOKEN", security_token - ) + self.security_token = check_env.check("SALESFORCE_SECURITY_TOKEN", security_token) if test_environment: self.domain = check_env.check("SALESFORCE_DOMAIN", "test") diff --git a/parsons/scytl/scytl.py b/parsons/scytl/scytl.py index 8873e0eb6d..05033d184d 100644 --- a/parsons/scytl/scytl.py +++ b/parsons/scytl/scytl.py @@ -11,9 +11,7 @@ CLARITY_URL = "https://results.enr.clarityelections.com/" -CURRENT_VERSION_URL_TEMPLATE = ( - CLARITY_URL + "{administrator}/{election_id}/current_ver.txt" -) +CURRENT_VERSION_URL_TEMPLATE = CLARITY_URL + "{administrator}/{election_id}/current_ver.txt" SUMMARY_CSV_ZIP_URL_TEMPLATE = ( CLARITY_URL + "{administrator}/{election_id}/{version_num}/reports/summary.zip" ) @@ -85,9 +83,7 @@ def __init__(self, state: str, election_id: str, county=""): self.state = state self.county = county.replace(" ", "_") - self.administrator = ( - f"{self.state}/{self.county}" if self.county else self.state - ) + self.administrator = f"{self.state}/{self.county}" if self.county else self.state self.election_id = election_id self.previous_summary_version_num = None @@ -187,9 +183,7 @@ def _get_latest_counties_scytl_info( state=state, election_id=election_id, version_num=version_num ) - settings_json_res = requests.get( - config_settings_json_url, headers=BROWSER_HEADERS - ) + settings_json_res = requests.get(config_settings_json_url, headers=BROWSER_HEADERS) settings_json = settings_json_res.json() participating_counties = settings_json["settings"]["electiondetails"][ @@ -294,9 +288,7 @@ def _parse_county_xml_data_to_precincts( "precinct_name": precinct_name, "recorded_votes": cand_votes[precinct_name], "voter_turnout": precinct_turnout.get("voter_turnout"), - "percent_reporting": precinct_turnout.get( - "percent_reporting" - ), + "percent_reporting": precinct_turnout.get("percent_reporting"), "timestamp_last_updated": county_details.county_update_date, } @@ -304,9 +296,7 @@ def _parse_county_xml_data_to_precincts( return precinct_votes - def _parse_state_xml_data_to_counties( - self, state_data: bytes, state: str - ) -> t.List[t.Dict]: + def _parse_state_xml_data_to_counties(self, state_data: bytes, state: str) -> t.List[t.Dict]: """ Parse a detail XML file for a state into a list of election results by county and vote method. @@ -330,9 +320,7 @@ def _parse_state_xml_data_to_counties( for child in root: - if ( - child.tag == "Timestamp" - ): # 1/5/2021 3:22:30 PM EST + if child.tag == "Timestamp": # 1/5/2021 3:22:30 PM EST timestamp = self._parse_date_to_utc(child.text) if child.tag == "ElectionVoterTurnout": @@ -373,12 +361,8 @@ def _parse_state_xml_data_to_counties( "office": office, "ballots_cast": county_turnout.get("ballotsCast"), "reg_voters": county_turnout.get("totalVoters"), - "precincts_reporting": county_turnout.get( - "precinctsReported" - ), - "total_precincts": county_turnout.get( - "precinctsParticipating" - ), + "precincts_reporting": county_turnout.get("precinctsReported"), + "total_precincts": county_turnout.get("precinctsParticipating"), "vote_method": vote_type_label, "candidate_name": cand_name, "candidate_party": cand_party, @@ -559,17 +543,11 @@ def get_detailed_results(self, force_update=False) -> t.List[t.Dict]: county_data = self._parse_file_from_zip_url(detail_xml_url, "detail.xml") if self.county: - county_details = CountyDetails( - self.state, self.county, self.election_id, version_num - ) + county_details = CountyDetails(self.state, self.county, self.election_id, version_num) - parsed_data = self._parse_county_xml_data_to_precincts( - county_data, county_details - ) + parsed_data = self._parse_county_xml_data_to_precincts(county_data, county_details) else: - parsed_data = self._parse_state_xml_data_to_counties( - county_data, self.state - ) + parsed_data = self._parse_state_xml_data_to_counties(county_data, self.state) self.previous_details_version_num = version_num @@ -662,9 +640,7 @@ def get_detailed_results_for_participating_counties( ) try: - county_data = self._parse_file_from_zip_url( - detail_xml_url, "detail.xml" - ) + county_data = self._parse_file_from_zip_url(detail_xml_url, "detail.xml") except requests.exceptions.RequestException: try: @@ -683,9 +659,7 @@ def get_detailed_results_for_participating_counties( parsed_data += summary_data else: - parsed_data += self._parse_county_xml_data_to_precincts( - county_data, county_details - ) + parsed_data += self._parse_county_xml_data_to_precincts(county_data, county_details) fetched_counties.append(county_name) diff --git a/parsons/sftp/sftp.py b/parsons/sftp/sftp.py index e1b441477a..9f6ad47644 100644 --- a/parsons/sftp/sftp.py +++ b/parsons/sftp/sftp.py @@ -198,8 +198,7 @@ def get_files( files_to_download.extend( f for file_list in [ - self.list_files(directory, connection, pattern) - for directory in remote + self.list_files(directory, connection, pattern) for directory in remote ] for f in file_list ) @@ -340,9 +339,7 @@ def _list_contents(remote_path, connection, dir_pattern=None, file_pattern=None) entry_pathname = remote_path + "/" + entry.filename for method, pattern, do_search_full_path, paths in dirs_and_files: string = entry_pathname if do_search_full_path else entry.filename - if method(entry.st_mode) and ( - not pattern or re.search(pattern, string) - ): + if method(entry.st_mode) and (not pattern or re.search(pattern, string)): paths.append(entry_pathname) except FileNotFoundError: # This error is raised when a directory is empty pass @@ -457,9 +454,7 @@ def _walk_tree( depth += 1 - dirs, files = self._list_contents( - remote_path, connection, dir_pattern, file_pattern - ) + dirs, files = self._list_contents(remote_path, connection, dir_pattern, file_pattern) if download: self.get_files(files_to_download=files) diff --git a/parsons/shopify/shopify.py b/parsons/shopify/shopify.py index 467753ad52..d6a18e15ab 100644 --- a/parsons/shopify/shopify.py +++ b/parsons/shopify/shopify.py @@ -39,9 +39,7 @@ def __init__( access_token=None, ): self.subdomain = check_env.check("SHOPIFY_SUBDOMAIN", subdomain) - self.access_token = check_env.check( - "SHOPIFY_ACCESS_TOKEN", access_token, optional=True - ) + self.access_token = check_env.check("SHOPIFY_ACCESS_TOKEN", access_token, optional=True) self.password = check_env.check("SHOPIFY_PASSWORD", password, optional=True) self.api_key = check_env.check("SHOPIFY_API_KEY", api_key, optional=True) self.api_version = check_env.check("SHOPIFY_API_VERSION", api_version) @@ -49,18 +47,14 @@ def __init__( self.subdomain, self.api_version, ) - if self.access_token is None and ( - self.password is None or self.api_key is None - ): + if self.access_token is None and (self.password is None or self.api_key is None): raise KeyError("Must set either access_token or both api_key and password.") if self.access_token is not None: self.client = APIConnector( self.base_url, headers={"X-Shopify-Access-Token": access_token} ) else: - self.client = APIConnector( - self.base_url, auth=(self.api_key, self.password) - ) + self.client = APIConnector(self.base_url, auth=(self.api_key, self.password)) def get_count(self, query_date=None, since_id=None, table_name=None): """ @@ -77,9 +71,7 @@ def get_count(self, query_date=None, since_id=None, table_name=None): int """ return ( - self.client.request( - self.get_query_url(query_date, since_id, table_name), "GET" - ) + self.client.request(self.get_query_url(query_date, since_id, table_name), "GET") .json() .get("count", 0) ) @@ -144,9 +136,7 @@ def _append_orders(url): return Table(orders) - def get_query_url( - self, query_date=None, since_id=None, table_name=None, count=True - ): + def get_query_url(self, query_date=None, since_id=None, table_name=None, count=True): """ Get the URL of a Shopify API request `Args:` @@ -192,9 +182,7 @@ def graphql(self, query): dict """ return ( - self.client.request( - self.base_url + "graphql.json", "POST", json={"query": query} - ) + self.client.request(self.base_url + "graphql.json", "POST", json={"query": query}) .json() .get("data") ) diff --git a/parsons/sisense/sisense.py b/parsons/sisense/sisense.py index 3da1eb97e5..6032739623 100644 --- a/parsons/sisense/sisense.py +++ b/parsons/sisense/sisense.py @@ -50,9 +50,7 @@ def publish_shared_dashboard(self, dashboard_id, chart_id=None, **kwargs): Response (dict containing the URL) or an error """ payload = {"dashboard": dashboard_id, "chart": chart_id, **kwargs} - return self.api.post_request( - "shared_dashboard/create", data=json.dumps(payload) - ) + return self.api.post_request("shared_dashboard/create", data=json.dumps(payload)) def list_shared_dashboards(self, dashboard_id): """ @@ -83,6 +81,4 @@ def delete_shared_dashboard(self, token): Response or an error """ payload = {"token": token} - return self.api.post_request( - "shared_dashboard/delete", data=json.dumps(payload) - ) + return self.api.post_request("shared_dashboard/delete", data=json.dumps(payload)) diff --git a/parsons/targetsmart/targetsmart_api.py b/parsons/targetsmart/targetsmart_api.py index bbe276df7e..9d4db09798 100644 --- a/parsons/targetsmart/targetsmart_api.py +++ b/parsons/targetsmart/targetsmart_api.py @@ -3,6 +3,7 @@ https://docs.targetsmart.com/developers/tsapis/v2/index.html """ + import logging import petl @@ -60,9 +61,7 @@ def data_enhance(self, search_id, search_id_type="voterbase", state=None): if search_id_type in ["smartvan", "votebuilder", "voter"] and state is None: - raise KeyError( - "Search ID type '{}' requires state kwarg".format(search_id_type) - ) + raise KeyError("Search ID type '{}' requires state kwarg".format(search_id_type)) if search_id_type not in ( "voterbase", @@ -205,9 +204,7 @@ def radius_search( } r = self.connection.request(url, args=args, raw=True) - return Table([itm for itm in r["output"]]).unpack_dict( - "data_fields", prepend=False - ) + return Table([itm for itm in r["output"]]).unpack_dict("data_fields", prepend=False) def phone(self, table): """ @@ -290,9 +287,7 @@ def district( raise ValueError("Search type 'zip' requires 'zip5' and 'zip4' arguments") elif search_type == "point" and None in [latitude, longitude]: - raise ValueError( - "Search type 'point' requires 'latitude' and 'longitude' arguments" - ) + raise ValueError("Search type 'point' requires 'latitude' and 'longitude' arguments") elif search_type == "address" and None in [address]: raise ValueError("Search type 'address' requires 'address' argument") diff --git a/parsons/targetsmart/targetsmart_automation.py b/parsons/targetsmart/targetsmart_automation.py index eff5ba0847..f776297201 100644 --- a/parsons/targetsmart/targetsmart_automation.py +++ b/parsons/targetsmart/targetsmart_automation.py @@ -139,9 +139,7 @@ def match( self.match_status(job_name) # Download the resulting file - tbl = Table.from_csv( - self.sftp.get_file(f"{self.sftp_dir}/{job_name}_output.csv") - ) + tbl = Table.from_csv(self.sftp.get_file(f"{self.sftp_dir}/{job_name}_output.csv")) finally: # Clean up files @@ -159,9 +157,7 @@ def execute(self, *args, **kwargs): """ self.match(*args, **kwargs) - def create_job_xml( - self, job_type, job_name, emails=None, status_key=None, call_back=None - ): + def create_job_xml(self, job_type, job_name, emails=None, status_key=None, call_back=None): # Internal method to create a valid job xml job = ET.Element("job") @@ -240,18 +236,14 @@ def match_status(self, job_name, polling_interval=60): if file_name == f"{job_name}.finish.xml": - xml_file = self.sftp.get_file( - f"{self.sftp_dir}/{job_name}.finish.xml" - ) + xml_file = self.sftp.get_file(f"{self.sftp_dir}/{job_name}.finish.xml") with open(xml_file, "rb") as x: xml = xmltodict.parse(x, dict_constructor=dict) if xml["jobcontext"]["state"] == "error": # To Do: Parse these in a pretty way logger.info(f"Match Error: {xml['jobcontext']['errors']}") - raise ValueError( - f"Match job failed. {xml['jobcontext']['errors']}" - ) + raise ValueError(f"Match job failed. {xml['jobcontext']['errors']}") elif xml["jobcontext"]["state"] == "success": logger.info("Match complete.") diff --git a/parsons/targetsmart/targetsmart_smartmatch.py b/parsons/targetsmart/targetsmart_smartmatch.py index 55798410c2..d221ee613f 100644 --- a/parsons/targetsmart/targetsmart_smartmatch.py +++ b/parsons/targetsmart/targetsmart_smartmatch.py @@ -196,8 +196,7 @@ def smartmatch( if not input_table: raise ValueError( - "Missing `input_table`. A Petl table must be provided with" - " valid input rows." + "Missing `input_table`. A Petl table must be provided with" " valid input rows." ) if not hasattr(input_table, "tocsv"): @@ -235,8 +234,7 @@ def smartmatch( response_1_info = response_1.json() if response_1_info["error"]: raise SmartMatchError( - "SmartMatch workflow registration failed. Error:" - f" {response_1_info['error']}" + "SmartMatch workflow registration failed. Error:" f" {response_1_info['error']}" ) logger.info( @@ -282,8 +280,7 @@ def smartmatch( delete=False, ) as tmp_csv: logger.info( - f"Downloading the '{submit_filename}' SmartMatch results to" - f" {tmp_gz.name}." + f"Downloading the '{submit_filename}' SmartMatch results to" f" {tmp_gz.name}." ) _smartmatch_download(download_url, tmp_gz) tmp_gz.flush() @@ -297,8 +294,7 @@ def smartmatch( tmp_csv.name, encoding="utf8" ).convert(INTERNAL_JOIN_ID, int) logger.info( - "SmartMatch remote execution successful. Joining results to" - " input table." + "SmartMatch remote execution successful. Joining results to" " input table." ) outtable = ( petl.leftjoin( # pylint: disable=no-member @@ -311,8 +307,6 @@ def smartmatch( .cutout(INTERNAL_JOIN_ID) ) if INTERNAL_JOIN_ID_CONFLICT in input_table.fieldnames(): - input_table = input_table.rename( - INTERNAL_JOIN_ID_CONFLICT, INTERNAL_JOIN_ID - ) + input_table = input_table.rename(INTERNAL_JOIN_ID_CONFLICT, INTERNAL_JOIN_ID) return Table(outtable) diff --git a/parsons/tools/credential_tools.py b/parsons/tools/credential_tools.py index 40d0374c8b..056723cebe 100644 --- a/parsons/tools/credential_tools.py +++ b/parsons/tools/credential_tools.py @@ -27,9 +27,7 @@ def decode_credential(credential, save_path=None, export=True, echo=False): if credential[:x] != PREFIX: raise ValueError("Invalid Parsons variable.") - decoded_str = b64decode(bytes(credential.replace(PREFIX, ""), "utf-8")).decode( - "utf-8" - ) + decoded_str = b64decode(bytes(credential.replace(PREFIX, ""), "utf-8")).decode("utf-8") decoded_dict = json.loads(decoded_str) @@ -130,9 +128,7 @@ def encode_from_dict(credential): default=True, help="Endcode a credential.", ) -@click.option( - "--decode", "-d", "fn", flag_value="decode", help="Decode an encoded credential." -) +@click.option("--decode", "-d", "fn", flag_value="decode", help="Decode an encoded credential.") @click.option( "-f", "is_file", @@ -151,13 +147,9 @@ def encode_from_dict(credential): "no_export", is_flag=True, default=False, - help=( - "Do not export the variable to the environment. Only " "valid with --decode." - ), -) -@click.option( - "-s", "suppress", is_flag=True, default=False, help=("Suppress " "the output.") + help=("Do not export the variable to the environment. Only " "valid with --decode."), ) +@click.option("-s", "suppress", is_flag=True, default=False, help=("Suppress " "the output.")) def main(credential, fn, is_file=False, save_path="", no_export=False, suppress=False): """A command line tool to encode and decode credentials. diff --git a/parsons/utilities/api_connector.py b/parsons/utilities/api_connector.py index 9db696c022..dfced6487b 100644 --- a/parsons/utilities/api_connector.py +++ b/parsons/utilities/api_connector.py @@ -32,9 +32,7 @@ class APIConnector(object): APIConnector class """ - def __init__( - self, uri, headers=None, auth=None, pagination_key=None, data_key=None - ): + def __init__(self, uri, headers=None, auth=None, pagination_key=None, data_key=None): # Add a trailing slash if its missing if not uri.endswith("/"): uri = uri + "/" @@ -163,9 +161,7 @@ def delete_request(self, url, params=None, success_codes=[200, 201, 204]): else: return r.status_code - def put_request( - self, url, data=None, json=None, params=None, success_codes=[200, 201, 204] - ): + def put_request(self, url, data=None, json=None, params=None, success_codes=[200, 201, 204]): """ Make a PUT request. @@ -192,9 +188,7 @@ def put_request( else: return r.status_code - def patch_request( - self, url, params=None, data=None, json=None, success_codes=[200, 201, 204] - ): + def patch_request(self, url, params=None, data=None, json=None, success_codes=[200, 201, 204]): """ Make a PATCH request. diff --git a/parsons/utilities/check_env.py b/parsons/utilities/check_env.py index b175ed8dd1..29a8e74802 100644 --- a/parsons/utilities/check_env.py +++ b/parsons/utilities/check_env.py @@ -13,7 +13,6 @@ def check(env, field, optional=False): except KeyError: if not optional: raise KeyError( - f"No {env} found. Store as environment variable or " - f"pass as an argument." + f"No {env} found. Store as environment variable or " f"pass as an argument." ) return field diff --git a/parsons/utilities/cloud_storage.py b/parsons/utilities/cloud_storage.py index 83e829185c..7b27ce89c1 100644 --- a/parsons/utilities/cloud_storage.py +++ b/parsons/utilities/cloud_storage.py @@ -8,9 +8,7 @@ """ -def post_file( - tbl, type, file_path=None, quoting=csv.QUOTE_MINIMAL, **file_storage_args -): +def post_file(tbl, type, file_path=None, quoting=csv.QUOTE_MINIMAL, **file_storage_args): """ This utility method is a generalizable method for moving files to an online file storage class. It is used by methods that require access @@ -40,9 +38,7 @@ def post_file( if "key" in file_storage_args: file_storage_args["key"] = file_path - return tbl.to_s3_csv( - public_url=True, key=file_path, quoting=quoting, **file_storage_args - ) + return tbl.to_s3_csv(public_url=True, key=file_path, quoting=quoting, **file_storage_args) elif type.upper() == "GCS": diff --git a/parsons/utilities/dbt.py b/parsons/utilities/dbt.py index f5888f93ae..9861564344 100644 --- a/parsons/utilities/dbt.py +++ b/parsons/utilities/dbt.py @@ -25,7 +25,6 @@ ``` """ - import datetime import json import logging @@ -113,14 +112,10 @@ def record_result( log_message += f"\n*Summary*: `{done_message}`" if error_messages: - log_message += "\nError messages:\n```{}```".format( - "\n\n".join(error_messages) - ) + log_message += "\nError messages:\n```{}```".format("\n\n".join(error_messages)) if warn_messages: - log_message += "\nWarning messages:\n```{}```".format( - "\n\n".join(warn_messages) - ) + log_message += "\nWarning messages:\n```{}```".format("\n\n".join(warn_messages)) if skip_messages: skips = [ @@ -188,9 +183,7 @@ def log_results(self, command_str: str, stdout: str, stderr: str) -> None: logger.error(log_message) error_messages.append(log_message) # Capture model/test warnings but exclude verbose top-level warnings - elif ( - row["info"]["level"] == "warn" and "[WARNING]" not in row["info"]["msg"] - ): + elif row["info"]["level"] == "warn" and "[WARNING]" not in row["info"]["msg"]: logger.warning(log_message) warn_messages.append(log_message) elif "SKIP " in row["info"]["msg"]: @@ -205,9 +198,7 @@ def log_results(self, command_str: str, stdout: str, stderr: str) -> None: else: done_message = "" - self.record_result( - command_str, error_messages, warn_messages, skip_messages, done_message - ) + self.record_result(command_str, error_messages, warn_messages, skip_messages, done_message) class dbtRunner: diff --git a/parsons/utilities/oauth_api_connector.py b/parsons/utilities/oauth_api_connector.py index fa25e018be..d0b006f5b9 100644 --- a/parsons/utilities/oauth_api_connector.py +++ b/parsons/utilities/oauth_api_connector.py @@ -65,7 +65,7 @@ def __init__( token_url=token_url, client_id=client_id, client_secret=client_secret, - **authorization_kwargs + **authorization_kwargs, ) self.client = OAuth2Session( client_id, diff --git a/parsons/zoom/zoom.py b/parsons/zoom/zoom.py index b71b354566..7c71bbe32d 100644 --- a/parsons/zoom/zoom.py +++ b/parsons/zoom/zoom.py @@ -126,9 +126,7 @@ def __process_poll_results(self, tbl: Table) -> Table: tbl.remove_column("question_details") # Unpack question values - tbl = tbl.unpack_dict( - "question_details_value", include_original=True, prepend=False - ) + tbl = tbl.unpack_dict("question_details_value", include_original=True, prepend=False) # Remove column from API response tbl.remove_column("question_details_value") @@ -221,9 +219,7 @@ def get_past_meeting_participants(self, meeting_id): See :ref:`parsons-table` for output options. """ - tbl = self._get_request( - f"report/meetings/{meeting_id}/participants", "participants" - ) + tbl = self._get_request(f"report/meetings/{meeting_id}/participants", "participants") logger.info(f"Retrieved {tbl.num_rows} participants.") return tbl @@ -271,9 +267,7 @@ def get_past_webinar_participants(self, webinar_id): See :ref:`parsons-table` for output options. """ - tbl = self._get_request( - f"report/webinars/{webinar_id}/participants", "participants" - ) + tbl = self._get_request(f"report/webinars/{webinar_id}/participants", "participants") logger.info(f"Retrieved {tbl.num_rows} webinar participants.") return tbl diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..66c837c930 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,18 @@ +[tool.black] +line-length = 100 +target-version = ['py38'] +include = '\.pyi?$' + +[tool.flake8] +max-line-length = 100 +ignore = ['E203', 'W503'] + +[tool.pytest.ini_options] +addopts = "-rf" +testpaths = [ + "test", +] +filterwarnings = [ + # Warnings triggered by libraries we use (not our own code) + "ignore:invalid escape sequence:DeprecationWarning" +] \ No newline at end of file diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index b04ac7ff4b..0000000000 --- a/pytest.ini +++ /dev/null @@ -1,5 +0,0 @@ -[pytest] -filterwarnings = - # Warnings triggered by libraries we use (not our own code) - ignore:Using or importing the ABCs from 'collections':DeprecationWarning - ignore:invalid escape sequence:DeprecationWarning diff --git a/requirements-dev.txt b/requirements-dev.txt index 754dd5e074..0bee9cdfa2 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,8 +1,9 @@ # Testing Requirements -requests-mock==1.5.2 -flake8==4.0.1 -black==22.12.0 -testfixtures==6.18.5 -pytest==7.1.1 -pytest-datadir==1.3.0 -pytest-mock>=3.0.0 +black==24.2.0 +Flake8-pyproject==1.2.3 +flake8==7.0.0 +pytest-datadir==1.5.0 +pytest-mock==3.12.0 +pytest==8.1.1 +requests-mock==1.11.0 +testfixtures==8.1.0 diff --git a/test/test_actblue/test_actblue.py b/test/test_actblue/test_actblue.py index 2132ed9878..4d73e07b5c 100644 --- a/test/test_actblue/test_actblue.py +++ b/test/test_actblue/test_actblue.py @@ -31,9 +31,7 @@ class TestActBlue(unittest.TestCase): def setUp(self, m): self.ab = ActBlue(TEST_CLIENT_UUID, TEST_CLIENT_SECRET, TEST_URI) self.from_csv = Table.from_csv - test_csv_data = Table.from_csv_string( - open("test/test_actblue/test_csv_data.csv").read() - ) + test_csv_data = Table.from_csv_string(open("test/test_actblue/test_csv_data.csv").read()) Table.from_csv = MagicMock(name="mocked from_csv", return_value=test_csv_data) def tearDown(self): @@ -43,9 +41,7 @@ def tearDown(self): def test_successful_post_request(self, m): m.post(f"{TEST_URI}/csvs", json=TEST_POST_RESPONSE) - response = self.ab.post_request( - TEST_CSV_TYPE, TEST_DATE_RANGE_START, TEST_DATE_RANGE_END - ) + response = self.ab.post_request(TEST_CSV_TYPE, TEST_DATE_RANGE_START, TEST_DATE_RANGE_END) assert response["id"] == TEST_POST_RESPONSE["id"] @requests_mock.Mocker() @@ -77,7 +73,5 @@ def test_successful_get_contributions(self, m): m.post(f"{TEST_URI}/csvs", json=TEST_POST_RESPONSE) m.get(f"{TEST_URI}/csvs/{TEST_ID}", json=TEST_GET_RESPONSE) - table = self.ab.get_contributions( - TEST_CSV_TYPE, TEST_DATE_RANGE_START, TEST_DATE_RANGE_END - ) + table = self.ab.get_contributions(TEST_CSV_TYPE, TEST_DATE_RANGE_START, TEST_DATE_RANGE_END) assert test_columns_data.expected_table_columns == table.columns diff --git a/test/test_action_builder/test_action_builder.py b/test/test_action_builder/test_action_builder.py index 3d6e896a29..d0a71dd2e5 100644 --- a/test/test_action_builder/test_action_builder.py +++ b/test/test_action_builder/test_action_builder.py @@ -203,15 +203,11 @@ def setUp(self, m): self.fake_update_person = { k: v for k, v in self.fake_insert_person.items() if k != "entity_type" } - self.fake_update_person["identifier"] = [ - f"action_builder:{self.fake_entity_id}" - ] + self.fake_update_person["identifier"] = [f"action_builder:{self.fake_entity_id}"] self.fake_tag_id = "fake_tag_id" self.fake_tagging_id = "fake_tagging_id" - self.fake_remove_tag_resp = { - "message": "Tag has been removed from Taggable Logbook" - } + self.fake_remove_tag_resp = {"message": "Tag has been removed from Taggable Logbook"} # self.fake_connection = {"person_id": "fake-entity-id-2"} self.fake_connection = { @@ -225,9 +221,7 @@ def test_get_page(self, m): f"{self.api_url}/tags?page=2&per_page=2", text=json.dumps(self.fake_tags_list_2), ) - self.assertEqual( - self.bldr._get_page(self.campaign, "tags", 2, 2), self.fake_tags_list_2 - ) + self.assertEqual(self.bldr._get_page(self.campaign, "tags", 2, 2), self.fake_tags_list_2) @requests_mock.Mocker() def test_get_all_records(self, m): @@ -262,9 +256,7 @@ def test_get_campaign_tags(self, m): f"{self.api_url}/tags?page=3&per_page=25", text=json.dumps({"_embedded": {"osdi:tags": []}}), ) - assert_matching_tables( - self.bldr.get_campaign_tags(), Table(self.fake_tags_list) - ) + assert_matching_tables(self.bldr.get_campaign_tags(), Table(self.fake_tags_list)) @requests_mock.Mocker() def test_get_tag_by_name(self, m): @@ -286,9 +278,7 @@ def prepare_dict_key_intersection(self, dict1, dict2): # keys whose values are not lists (i.e. nested). common_keys = { - key - for key, value in dict1.items() - if key in dict2 and not isinstance(value, list) + key for key, value in dict1.items() if key in dict2 and not isinstance(value, list) } dict1_comp = {key: value for key, value in dict1.items() if key in common_keys} @@ -303,9 +293,7 @@ def test_upsert_entity(self, m): # Flatten and remove items added for spreadable arguments upsert_person = self.fake_upsert_person["person"] - upsert_response = self.bldr._upsert_entity( - self.fake_upsert_person, self.campaign - ) + upsert_response = self.bldr._upsert_entity(self.fake_upsert_person, self.campaign) person_comp, upsert_response_comp = self.prepare_dict_key_intersection( upsert_person, upsert_response @@ -405,9 +393,7 @@ def test_upsert_connection(self, m): f"{self.api_url}/people/{self.fake_entity_id}/connections", json=self.connect_callback, ) - connect_response = self.bldr.upsert_connection( - [self.fake_entity_id, "fake-entity-id-2"] - ) + connect_response = self.bldr.upsert_connection([self.fake_entity_id, "fake-entity-id-2"]) self.assertEqual( connect_response, { diff --git a/test/test_action_kit.py b/test/test_action_kit.py index 8a2c275a10..378d6c55f3 100644 --- a/test/test_action_kit.py +++ b/test/test_action_kit.py @@ -218,9 +218,7 @@ def test_create_event_create_page(self): type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) self.actionkit.conn = resp_mock - self.actionkit.create_event_create_page( - name="new_page", campaign_id="123", title="title" - ) + self.actionkit.create_event_create_page(name="new_page", campaign_id="123", title="title") self.actionkit.conn.post.assert_called_with( "https://domain.actionkit.com/rest/v1/eventcreatepage/", data=json.dumps( @@ -247,9 +245,7 @@ def test_create_event_create_form(self): type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) self.actionkit.conn = resp_mock - self.actionkit.create_event_create_form( - page_id="123", thank_you_text="thank you" - ) + self.actionkit.create_event_create_form(page_id="123", thank_you_text="thank you") self.actionkit.conn.post.assert_called_with( "https://domain.actionkit.com/rest/v1/eventcreateform/", data=json.dumps( @@ -272,9 +268,7 @@ def test_create_event_signup_page(self): type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) self.actionkit.conn = resp_mock - self.actionkit.create_event_signup_page( - name="new_name", campaign_id="123", title="title" - ) + self.actionkit.create_event_signup_page(name="new_name", campaign_id="123", title="title") self.actionkit.conn.post.assert_called_with( "https://domain.actionkit.com/rest/v1/eventsignuppage/", data=json.dumps( @@ -301,14 +295,10 @@ def test_create_event_signup_form(self): type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) self.actionkit.conn = resp_mock - self.actionkit.create_event_signup_form( - page_id="123", thank_you_text="thank you" - ) + self.actionkit.create_event_signup_form(page_id="123", thank_you_text="thank you") self.actionkit.conn.post.assert_called_with( "https://domain.actionkit.com/rest/v1/eventsignupform/", - data=json.dumps( - {"page": "/rest/v1/page/123/", "thank_you_text": "thank you"} - ), + data=json.dumps({"page": "/rest/v1/page/123/", "thank_you_text": "thank you"}), ) def test_update_event_signup(self): @@ -696,16 +686,12 @@ def test_table_split(self): assert_matching_tables(tables[0], Table([("x", "y"), ("a", "b")])) assert_matching_tables(tables[1], Table([("x", "z"), ("1", "3"), ("4", "6")])) - test2 = Table( - [("x", "y", "z"), ("a", "b", "c"), ("1", "2", "3"), ("4", "5", "6")] - ) + test2 = Table([("x", "y", "z"), ("a", "b", "c"), ("1", "2", "3"), ("4", "5", "6")]) tables2 = self.actionkit._split_tables_no_empties(test2, True, []) self.assertEqual(len(tables2), 1) assert_matching_tables(tables2[0], test2) - test3 = Table( - [("x", "y", "z"), ("a", "b", ""), ("1", "2", "3"), ("4", "5", "6")] - ) + test3 = Table([("x", "y", "z"), ("a", "b", ""), ("1", "2", "3"), ("4", "5", "6")]) tables3 = self.actionkit._split_tables_no_empties(test3, False, ["z"]) self.assertEqual(len(tables3), 2) assert_matching_tables(tables3[0], Table([("x", "y"), ("a", "b")])) diff --git a/test/test_action_network/test_action_network.py b/test/test_action_network/test_action_network.py index f1c009bbad..3dfc2e9b3d 100644 --- a/test/test_action_network/test_action_network.py +++ b/test/test_action_network/test_action_network.py @@ -430,31 +430,17 @@ def setUp(self, m): "languages_spoken": ["en"], "_links": { "self": {"href": f"{self.api_url}/people/fake_url"}, - "osdi:attendances": { - "href": f"{self.api_url}/people/fake_url" - }, - "osdi:signatures": { - "href": f"{self.api_url}/people/fake_url" - }, - "osdi:submissions": { - "href": f"{self.api_url}/people/fake_url" - }, - "osdi:donations": { - "href": f"{self.api_url}/people/fake_url" - }, - "osdi:outreaches": { - "href": f"{self.api_url}/people/fake_url" - }, - "osdi:taggings": { - "href": f"{self.api_url}/people/fake_url" - }, + "osdi:attendances": {"href": f"{self.api_url}/people/fake_url"}, + "osdi:signatures": {"href": f"{self.api_url}/people/fake_url"}, + "osdi:submissions": {"href": f"{self.api_url}/people/fake_url"}, + "osdi:donations": {"href": f"{self.api_url}/people/fake_url"}, + "osdi:outreaches": {"href": f"{self.api_url}/people/fake_url"}, + "osdi:taggings": {"href": f"{self.api_url}/people/fake_url"}, }, } }, "_links": { - "self": { - "href": f"{self.api_url}/advocacy_campaigns/fake_url" - }, + "self": {"href": f"{self.api_url}/advocacy_campaigns/fake_url"}, "osdi:outreaches": { "href": f"{self.api_url}/advocacy_campaigns/fake_url" }, @@ -520,31 +506,17 @@ def setUp(self, m): "languages_spoken": ["en"], "_links": { "self": {"href": f"{self.api_url}/people/fake_url"}, - "osdi:attendances": { - "href": f"{self.api_url}/people/fake_url" - }, - "osdi:signatures": { - "href": f"{self.api_url}/people/fake_url" - }, - "osdi:submissions": { - "href": f"{self.api_url}/people/fake_url" - }, - "osdi:donations": { - "href": f"{self.api_url}/people/fake_url" - }, - "osdi:outreaches": { - "href": f"{self.api_url}/people/fake_url" - }, - "osdi:taggings": { - "href": f"{self.api_url}/people/fake_url" - }, + "osdi:attendances": {"href": f"{self.api_url}/people/fake_url"}, + "osdi:signatures": {"href": f"{self.api_url}/people/fake_url"}, + "osdi:submissions": {"href": f"{self.api_url}/people/fake_url"}, + "osdi:donations": {"href": f"{self.api_url}/people/fake_url"}, + "osdi:outreaches": {"href": f"{self.api_url}/people/fake_url"}, + "osdi:taggings": {"href": f"{self.api_url}/people/fake_url"}, }, } }, "_links": { - "self": { - "href": f"{self.api_url}/advocacy_campaigns/fake_url" - }, + "self": {"href": f"{self.api_url}/advocacy_campaigns/fake_url"}, "osdi:outreaches": { "href": f"{self.api_url}/advocacy_campaigns/fake_url" }, @@ -560,9 +532,7 @@ def setUp(self, m): { "created_date": "2021-01-06T21:02:39Z", "modified_date": "2021-01-11T19:34:59Z", - "identifiers": [ - "action_network:44618be7-29cb-439e-bc68-70e6e85dda1b" - ], + "identifiers": ["action_network:44618be7-29cb-439e-bc68-70e6e85dda1b"], "origin_system": "Action Network", "name": "Call your elected officials", "title": "Call your elected officials", @@ -583,9 +553,7 @@ def setUp(self, m): "templated": True, }, ], - "self": { - "href": f"{self.api_url}/advocacy_campaigns/fake_url" - }, + "self": {"href": f"{self.api_url}/advocacy_campaigns/fake_url"}, "osdi:outreaches": { "href": f"{self.api_url}/advocacy_campaigns/fake_url" }, @@ -651,24 +619,12 @@ def setUp(self, m): "languages_spoken": ["en"], "_links": { "self": {"href": f"{self.api_url}/people/fake_id"}, - "osdi:attendances": { - "href": f"{self.api_url}/people/fake_id/attendances" - }, - "osdi:signatures": { - "href": f"{self.api_url}/people/fake_id/signatures" - }, - "osdi:submissions": { - "href": f"{self.api_url}/people/fake_id/submissions" - }, - "osdi:donations": { - "href": f"{self.api_url}/people/fake_id/donations" - }, - "osdi:outreaches": { - "href": f"{self.api_url}/people/fake_id/outreaches" - }, - "osdi:taggings": { - "href": f"{self.api_url}/people/fake_id/taggings" - }, + "osdi:attendances": {"href": f"{self.api_url}/people/fake_id/attendances"}, + "osdi:signatures": {"href": f"{self.api_url}/people/fake_id/signatures"}, + "osdi:submissions": {"href": f"{self.api_url}/people/fake_id/submissions"}, + "osdi:donations": {"href": f"{self.api_url}/people/fake_id/donations"}, + "osdi:outreaches": {"href": f"{self.api_url}/people/fake_id/outreaches"}, + "osdi:taggings": {"href": f"{self.api_url}/people/fake_id/taggings"}, "curies": [ { "name": "osdi", @@ -739,18 +695,14 @@ def setUp(self, m): "_embedded": { "osdi:attendances": [ { - "identifiers": [ - "action_network:d51ca19e-9fe9-11e3-a2e9-12313d316c29" - ], + "identifiers": ["action_network:d51ca19e-9fe9-11e3-a2e9-12313d316c29"], "created_date": "2014-02-18T20:52:59Z", "modified_date": "2014-02-18T20:53:00Z", "status": "accepted", "action_network:person_id": "fake_id", "action_network:event_id": "fake_id", "_links": { - "self": { - "href": f"{self.api_url}/events/fake_id/attendances/fake_id" - }, + "self": {"href": f"{self.api_url}/events/fake_id/attendances/fake_id"}, "osdi:event": {"href": f"{self.api_url}/events/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, }, @@ -763,9 +715,7 @@ def setUp(self, m): "action_network:person_id": "fake_id", "action_network:event_id": "fake_id", "_links": { - "self": { - "href": f"{self.api_url}/events/fake_id/attendances/fake_id" - }, + "self": {"href": f"{self.api_url}/events/fake_id/attendances/fake_id"}, "osdi:event": {"href": f"{self.api_url}/events/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, }, @@ -852,9 +802,7 @@ def setUp(self, m): "browser_url": "fake_url", }, ], - "_links": { - "self": {"href": f"{self.api_url}/campaigns/fake_id"} - }, + "_links": {"self": {"href": f"{self.api_url}/campaigns/fake_id"}}, }, { "identifiers": ["action_network:fake_id"], @@ -879,9 +827,7 @@ def setUp(self, m): "browser_url": "fake_url", }, ], - "_links": { - "self": {"href": f"{self.api_url}/campaigns/fake_id"} - }, + "_links": {"self": {"href": f"{self.api_url}/campaigns/fake_id"}}, }, ] }, @@ -934,11 +880,7 @@ def setUp(self, m): "name": "Custom Fields", "description": "The collection of custom fields available at this endpoint.", "_links": { - "self": [ - { - "href": "https://dev.actionnetwork.org/api/v2/metadata/custom_fields" - } - ], + "self": [{"href": "https://dev.actionnetwork.org/api/v2/metadata/custom_fields"}], "curies": [ { "name": "osdi", @@ -989,12 +931,8 @@ def setUp(self, m): "_links": { "self": {"href": f"{self.api_url}/fundraising_pages/fake_id/donations"}, "osdi:donations": [ - { - "href": f"{self.api_url}/fundraising_pages/fake_id/donations/fake_id" - }, - { - "href": f"{self.api_url}/fundraising_pages/fake_id/donations/fake_id" - }, + {"href": f"{self.api_url}/fundraising_pages/fake_id/donations/fake_id"}, + {"href": f"{self.api_url}/fundraising_pages/fake_id/donations/fake_id"}, ], "curies": [ { @@ -1012,9 +950,7 @@ def setUp(self, m): "_embedded": { "osdi:donations": [ { - "identifiers": [ - "action_network:f1119c4e-b8ca-44ff-bfa7-f78f7ca3ec16" - ], + "identifiers": ["action_network:f1119c4e-b8ca-44ff-bfa7-f78f7ca3ec16"], "created_date": "2014-03-27T17:42:21Z", "modified_date": "2014-03-27T17:42:24Z", "currency": "USD", @@ -1039,19 +975,13 @@ def setUp(self, m): "action_network:person_id": "fake_id", "action_network:fundraising_page_id": "fake_id", "_links": { - "self": { - "href": f"{self.api_url}/fundraising_pages/fake_url" - }, - "osdi:fundraising_page": { - "href": f"{self.api_url}/fake_url" - }, + "self": {"href": f"{self.api_url}/fundraising_pages/fake_url"}, + "osdi:fundraising_page": {"href": f"{self.api_url}/fake_url"}, "osdi:person": {"href": f"{self.api_url}/people/fake_url"}, }, }, { - "identifiers": [ - "action_network:d86538c1-e8f7-46e1-8320-552da81bd48d" - ], + "identifiers": ["action_network:d86538c1-e8f7-46e1-8320-552da81bd48d"], "created_date": "2014-03-27T17:40:56Z", "modified_date": "2014-03-27T17:41:11Z", "currency": "USD", @@ -1072,9 +1002,7 @@ def setUp(self, m): "action_network:person_id": "fake_id", "action_network:fundraising_page_id": "fake_id", "_links": { - "self": { - "href": "fundraising_pages/fake_id/donations/fake_id" - }, + "self": {"href": "fundraising_pages/fake_id/donations/fake_id"}, "osdi:fundraising_page": { "href": f"{self.api_url}/fundraising_pages/fake_id" }, @@ -1104,12 +1032,8 @@ def setUp(self, m): "action_network:person_id": "fake_id", "action_network:fundraising_page_id": "fake_id", "_links": { - "self": { - "href": f"{self.api_url}/fundraising_pages/fake_id/donations/fake_id" - }, - "osdi:fundraising_page": { - "href": f"{self.api_url}/fundraising_pages/fake_id" - }, + "self": {"href": f"{self.api_url}/fundraising_pages/fake_id/donations/fake_id"}, + "osdi:fundraising_page": {"href": f"{self.api_url}/fundraising_pages/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, "curies": [ { @@ -1298,12 +1222,8 @@ def setUp(self, m): }, "_links": { "self": {"href": f"{self.api_url}/event_campaigns/fake_id"}, - "osdi:events": { - "href": f"{self.api_url}/event_campaigns/fake_id/events" - }, - "action_network:embed": { - "href": f"{self.api_url}/event_campaigns/fake_id/embed" - }, + "osdi:events": {"href": f"{self.api_url}/event_campaigns/fake_id/events"}, + "action_network:embed": {"href": f"{self.api_url}/event_campaigns/fake_id/embed"}, "curies": [ { "name": "osdi", @@ -1386,9 +1306,7 @@ def setUp(self, m): "osdi:record_attendance_helper": { "href": f"{self.api_url}/events/fake_id/attendances" }, - "osdi:organizer": { - "href": f"{self.api_url}/people/fake_id" - }, + "osdi:organizer": {"href": f"{self.api_url}/people/fake_id"}, "osdi:creator": {"href": f"{self.api_url}/people/fake_id"}, "action_network:embed": { "href": f"{self.api_url}/events/fake_id/embed" @@ -1439,9 +1357,7 @@ def setUp(self, m): "osdi:record_attendance_helper": { "href": f"{self.api_url}/events/fake_id/attendances" }, - "osdi:organizer": { - "href": f"{self.api_url}/people/fake_id" - }, + "osdi:organizer": {"href": f"{self.api_url}/people/fake_id"}, "osdi:creator": {"href": f"{self.api_url}/people/fake_id"}, "action_network:embed": { "href": f"{self.api_url}/events/fake_id/embed" @@ -1484,17 +1400,13 @@ def setUp(self, m): }, "_links": { "self": {"href": f"{self.api_url}/events/fake_id"}, - "osdi:attendances": { - "href": f"{self.api_url}/events/fake_id/attendances" - }, + "osdi:attendances": {"href": f"{self.api_url}/events/fake_id/attendances"}, "osdi:record_attendance_helper": { "href": f"{self.api_url}/events/fake_id/attendances" }, "osdi:organizer": {"href": f"{self.api_url}/people/fake_id"}, "osdi:creator": {"href": f"{self.api_url}/people/fake_id"}, - "action_network:embed": { - "href": f"{self.api_url}/events/fake_id/embed" - }, + "action_network:embed": {"href": f"{self.api_url}/events/fake_id/embed"}, "curies": [ { "name": "osdi", @@ -1520,9 +1432,7 @@ def setUp(self, m): "next": {"href": f"{self.api_url}/forms?page=2"}, "self": {"href": f"{self.api_url}/forms"}, "osdi:forms": [ - { - "href": f"{self.api_url}/forms/65345d7d-cd24-466a-a698-4a7686ef684f" - }, + {"href": f"{self.api_url}/forms/65345d7d-cd24-466a-a698-4a7686ef684f"}, {"href": f"{self.api_url}/forms/fake_id"}, ], "curies": [ @@ -1622,9 +1532,7 @@ def setUp(self, m): "href": f"{self.api_url}/forms/fake_id/submissions" }, "osdi:creator": {"href": f"{self.api_url}/people/fake_id"}, - "action_network:embed": { - "href": f"{self.api_url}/forms/fake_id/embed" - }, + "action_network:embed": {"href": f"{self.api_url}/forms/fake_id/embed"}, }, }, { @@ -1709,9 +1617,7 @@ def setUp(self, m): "href": f"{self.api_url}/forms/fake_id/submissions" }, "osdi:creator": {"href": f"{self.api_url}/people/fake_id"}, - "action_network:embed": { - "href": f"{self.api_url}/forms/fake_id/embed" - }, + "action_network:embed": {"href": f"{self.api_url}/forms/fake_id/embed"}, }, }, ] @@ -1769,24 +1675,12 @@ def setUp(self, m): "languages_spoken": ["en"], "_links": { "self": {"href": f"{self.api_url}/people/fake_id"}, - "osdi:attendances": { - "href": f"{self.api_url}/people/fake_id/attendances" - }, - "osdi:signatures": { - "href": f"{self.api_url}/people/fake_id/signatures" - }, - "osdi:submissions": { - "href": f"{self.api_url}/people/fake_id/submissions" - }, - "osdi:donations": { - "href": f"{self.api_url}/people/fake_id/donations" - }, - "osdi:outreaches": { - "href": f"{self.api_url}/people/fake_id/outreaches" - }, - "osdi:taggings": { - "href": f"{self.api_url}/people/fake_id/taggings" - }, + "osdi:attendances": {"href": f"{self.api_url}/people/fake_id/attendances"}, + "osdi:signatures": {"href": f"{self.api_url}/people/fake_id/signatures"}, + "osdi:submissions": {"href": f"{self.api_url}/people/fake_id/submissions"}, + "osdi:donations": {"href": f"{self.api_url}/people/fake_id/donations"}, + "osdi:outreaches": {"href": f"{self.api_url}/people/fake_id/outreaches"}, + "osdi:taggings": {"href": f"{self.api_url}/people/fake_id/taggings"}, "curies": [ { "name": "osdi", @@ -1804,9 +1698,7 @@ def setUp(self, m): }, "_links": { "self": {"href": f"{self.api_url}/forms/fake_id"}, - "osdi:submissions": { - "href": f"{self.api_url}/forms/fake_id/submissions" - }, + "osdi:submissions": {"href": f"{self.api_url}/forms/fake_id/submissions"}, "osdi:record_submission_helper": { "href": f"{self.api_url}/forms/fake_id/submissions" }, @@ -1921,24 +1813,12 @@ def setUp(self, m): "languages_spoken": ["en"], "_links": { "self": {"href": f"{self.api_url}/people/fake_id"}, - "osdi:attendances": { - "href": f"{self.api_url}/people/fake_id/attendances" - }, - "osdi:signatures": { - "href": f"{self.api_url}/people/fake_id/signatures" - }, - "osdi:submissions": { - "href": f"{self.api_url}/people/fake_id/submissions" - }, - "osdi:donations": { - "href": f"{self.api_url}/people/fake_id/donations" - }, - "osdi:outreaches": { - "href": f"{self.api_url}/people/fake_id/outreaches" - }, - "osdi:taggings": { - "href": f"{self.api_url}/people/fake_id/taggings" - }, + "osdi:attendances": {"href": f"{self.api_url}/people/fake_id/attendances"}, + "osdi:signatures": {"href": f"{self.api_url}/people/fake_id/signatures"}, + "osdi:submissions": {"href": f"{self.api_url}/people/fake_id/submissions"}, + "osdi:donations": {"href": f"{self.api_url}/people/fake_id/donations"}, + "osdi:outreaches": {"href": f"{self.api_url}/people/fake_id/outreaches"}, + "osdi:taggings": {"href": f"{self.api_url}/people/fake_id/taggings"}, "curies": [ { "name": "osdi", @@ -1960,16 +1840,12 @@ def setUp(self, m): }, "_links": { "self": {"href": f"{self.api_url}/fundraising_pages/fake_id"}, - "osdi:donations": { - "href": f"{self.api_url}/fundraising_pages/fake_id/donations" - }, + "osdi:donations": {"href": f"{self.api_url}/fundraising_pages/fake_id/donations"}, "osdi:record_donation_helper": { "href": f"{self.api_url}/fundraising_pages/fake_id/donations" }, "osdi:creator": {"href": f"{self.api_url}/people/fake_id"}, - "action_network:embed": { - "href": f"{self.api_url}/fundraising_pages/fake_id/embed" - }, + "action_network:embed": {"href": f"{self.api_url}/fundraising_pages/fake_id/embed"}, "curies": [ { "name": "osdi", @@ -2013,9 +1889,7 @@ def setUp(self, m): "osdi:items": [ { "_links": { - "self": { - "href": f"{self.api_url}/lists/fake_id/items/fake_id" - }, + "self": {"href": f"{self.api_url}/lists/fake_id/items/fake_id"}, "osdi:list": {"href": f"{self.api_url}/lists/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, }, @@ -2028,9 +1902,7 @@ def setUp(self, m): }, { "_links": { - "self": { - "href": f"{self.api_url}/lists/fake_id/items/fake_id" - }, + "self": {"href": f"{self.api_url}/lists/fake_id/items/fake_id"}, "osdi:list": {"href": f"{self.api_url}/lists/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, }, @@ -2108,9 +1980,7 @@ def setUp(self, m): "browser_url": "fake_url", "_links": { "self": {"href": f"{self.api_url}/lists/fake_id"}, - "osdi:items": { - "href": f"{self.api_url}/lists/fake_id/items" - }, + "osdi:items": {"href": f"{self.api_url}/lists/fake_id/items"}, }, }, { @@ -2123,9 +1993,7 @@ def setUp(self, m): "browser_url": "fake_url", "_links": { "self": {"href": f"{self.api_url}/lists/fake_id"}, - "osdi:items": { - "href": f"{self.api_url}/lists/fake_id/items" - }, + "osdi:items": {"href": f"{self.api_url}/lists/fake_id/items"}, }, }, ] @@ -2213,15 +2081,11 @@ def setUp(self, m): }, "_links": { "self": {"href": f"{self.api_url}/messages/fake_id"}, - "osdi:wrapper": { - "href": f"{self.api_url}/wrappers/fake_id" - }, + "osdi:wrapper": {"href": f"{self.api_url}/wrappers/fake_id"}, "osdi:recipients": { "href": f"{self.api_url}/lists/950e9954-606f-43e6-be99-2bc0bc2072a1" }, - "osdi:send_helper": { - "href": f"{self.api_url}/messages/fake_id/send" - }, + "osdi:send_helper": {"href": f"{self.api_url}/messages/fake_id/send"}, "osdi:schedule_helper": { "href": f"{self.api_url}/messages/fake_id/schedule" }, @@ -2247,9 +2111,7 @@ def setUp(self, m): "targets": [], "_links": { "self": {"href": f"{self.api_url}/messages/fake_id"}, - "osdi:send_helper": { - "href": f"{self.api_url}/messages/fake_id/send" - }, + "osdi:send_helper": {"href": f"{self.api_url}/messages/fake_id/send"}, "osdi:schedule_helper": { "href": f"{self.api_url}/messages/fake_id/schedule" }, @@ -2291,9 +2153,7 @@ def setUp(self, m): "href": f"{self.api_url}/lists/950e9954-606f-43e6-be99-2bc0bc2072a1" }, "osdi:send_helper": {"href": f"{self.api_url}/messages/fake_id/send"}, - "osdi:schedule_helper": { - "href": f"{self.api_url}/messages/fake_id/schedule" - }, + "osdi:schedule_helper": {"href": f"{self.api_url}/messages/fake_id/schedule"}, }, } @@ -2327,16 +2187,10 @@ def setUp(self, m): "page": 1, "total_records": 6, "_links": { - "self": { - "href": f"{self.api_url}/advocacy_campaigns/fake_id/outreaches" - }, + "self": {"href": f"{self.api_url}/advocacy_campaigns/fake_id/outreaches"}, "osdi:outreaches": [ - { - "href": f"{self.api_url}/advocacy_campaigns/fake_id/outreaches/fake_id" - }, - { - "href": f"{self.api_url}/advocacy_campaigns/fake_id/outreaches/dfake_id" - }, + {"href": f"{self.api_url}/advocacy_campaigns/fake_id/outreaches/fake_id"}, + {"href": f"{self.api_url}/advocacy_campaigns/fake_id/outreaches/dfake_id"}, ], "curies": [ { @@ -2354,9 +2208,7 @@ def setUp(self, m): "_embedded": { "osdi:outreaches": [ { - "identifiers": [ - "action_network:f1119c4e-b8ca-44ff-bfa7-f78f7ca3ec16" - ], + "identifiers": ["action_network:f1119c4e-b8ca-44ff-bfa7-f78f7ca3ec16"], "created_date": "2014-03-27T17:42:21Z", "modified_date": "2014-03-27T17:42:24Z", "type": "email", @@ -2373,9 +2225,7 @@ def setUp(self, m): "action_network:person_id": "fake_id", "action_network:advocacy_campaign_id": "fake_id", "_links": { - "self": { - "href": "/advocacy_campaigns/fake_id/outreaches/fake_id" - }, + "self": {"href": "/advocacy_campaigns/fake_id/outreaches/fake_id"}, "osdi:advocacy_campaign": { "href": f"{self.api_url}/advocacy_campaigns/fake_id" }, @@ -2383,9 +2233,7 @@ def setUp(self, m): }, }, { - "identifiers": [ - "action_network:d86538c1-e8f7-46e1-8320-552da81bd48d" - ], + "identifiers": ["action_network:d86538c1-e8f7-46e1-8320-552da81bd48d"], "created_date": "2014-03-27T17:40:56Z", "modified_date": "2014-03-27T17:41:11Z", "type": "email", @@ -2402,9 +2250,7 @@ def setUp(self, m): "action_network:person_id": "fake_id", "action_network:advocacy_campaign_id": "fake_id", "_links": { - "self": { - "href": "advocacy_campaigns/fake_id/outreaches/fake_id" - }, + "self": {"href": "advocacy_campaigns/fake_id/outreaches/fake_id"}, "osdi:advocacy_campaign": { "href": f"{self.api_url}/advocacy_campaigns/fake_id" }, @@ -2432,12 +2278,8 @@ def setUp(self, m): "action_network:person_id": "fake_id", "action_network:advocacy_campaign_id": "fake_id", "_links": { - "self": { - "href": f"{self.api_url}/fundraising_page/fake_id/outreaches/fake_id" - }, - "osdi:advocacy_campaign": { - "href": f"{self.api_url}/advocacy_campaigns/fake_id" - }, + "self": {"href": f"{self.api_url}/fundraising_page/fake_id/outreaches/fake_id"}, + "osdi:advocacy_campaign": {"href": f"{self.api_url}/advocacy_campaigns/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, "curies": [ { @@ -2536,15 +2378,11 @@ def setUp(self, m): "osdi:submissions": { "href": f"{self.api_url}/people/fake_id/submissions" }, - "osdi:donations": { - "href": f"{self.api_url}/people/fake_id/donations" - }, + "osdi:donations": {"href": f"{self.api_url}/people/fake_id/donations"}, "osdi:outreaches": { "href": f"{self.api_url}/people/fake_id/outreaches" }, - "osdi:taggings": { - "href": f"{self.api_url}/people/fake_id/taggings" - }, + "osdi:taggings": {"href": f"{self.api_url}/people/fake_id/taggings"}, }, }, { @@ -2595,15 +2433,11 @@ def setUp(self, m): "osdi:submissions": { "href": f"{self.api_url}/people/fake_id/submissions" }, - "osdi:donations": { - "href": f"{self.api_url}/people/fake_id/donations" - }, + "osdi:donations": {"href": f"{self.api_url}/people/fake_id/donations"}, "osdi:outreaches": { "href": f"{self.api_url}/people/fake_id/outreaches" }, - "osdi:taggings": { - "href": f"{self.api_url}/people/fake_id/taggings" - }, + "osdi:taggings": {"href": f"{self.api_url}/people/fake_id/taggings"}, }, }, ] @@ -2876,24 +2710,12 @@ def setUp(self, m): "languages_spoken": ["en"], "_links": { "self": {"href": f"{self.api_url}/people/fake_id"}, - "osdi:attendances": { - "href": f"{self.api_url}/people/fake_id/attendances" - }, - "osdi:signatures": { - "href": f"{self.api_url}/people/fake_id/signatures" - }, - "osdi:submissions": { - "href": f"{self.api_url}/people/fake_id/submissions" - }, - "osdi:donations": { - "href": f"{self.api_url}/people/fake_id/donations" - }, - "osdi:outreaches": { - "href": f"{self.api_url}/people/fake_id/outreaches" - }, - "osdi:taggings": { - "href": f"{self.api_url}/people/fake_id/taggings" - }, + "osdi:attendances": {"href": f"{self.api_url}/people/fake_id/attendances"}, + "osdi:signatures": {"href": f"{self.api_url}/people/fake_id/signatures"}, + "osdi:submissions": {"href": f"{self.api_url}/people/fake_id/submissions"}, + "osdi:donations": {"href": f"{self.api_url}/people/fake_id/donations"}, + "osdi:outreaches": {"href": f"{self.api_url}/people/fake_id/outreaches"}, + "osdi:taggings": {"href": f"{self.api_url}/people/fake_id/taggings"}, "curies": [ { "name": "osdi", @@ -2911,16 +2733,12 @@ def setUp(self, m): }, "_links": { "self": {"href": f"{self.api_url}/petitions/fake_id"}, - "osdi:signatures": { - "href": f"{self.api_url}/petitions/fake_id/signatures" - }, + "osdi:signatures": {"href": f"{self.api_url}/petitions/fake_id/signatures"}, "osdi:record_signature_helper": { "href": f"{self.api_url}/petitions/fake_id/signatures" }, "osdi:creator": {"href": f"{self.api_url}/people/fake_id"}, - "action_network:embed": { - "href": f"{self.api_url}/petitions/fake_id/embed" - }, + "action_network:embed": {"href": f"{self.api_url}/petitions/fake_id/embed"}, "curies": [ { "name": "osdi", @@ -3046,16 +2864,12 @@ def setUp(self, m): "self": { "href": f"{self.api_url}/petitions/fake_id/signatures/fake_id" }, - "osdi:petition": { - "href": f"{self.api_url}/petitions/fake_id" - }, + "osdi:petition": {"href": f"{self.api_url}/petitions/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, }, }, { - "identifiers": [ - "action_network:71497ab2-b3e7-4896-af46-126ac7287dab" - ], + "identifiers": ["action_network:71497ab2-b3e7-4896-af46-126ac7287dab"], "created_date": "2014-03-26T16:07:10Z", "modified_date": "2014-03-26T16:07:10Z", "comments": "Stop doing the thing", @@ -3065,9 +2879,7 @@ def setUp(self, m): "self": { "href": f"{self.api_url}/petitions/fake_id/signatures/fake_id" }, - "osdi:petition": { - "href": f"{self.api_url}/petitions/fake_id" - }, + "osdi:petition": {"href": f"{self.api_url}/petitions/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, }, }, @@ -3082,9 +2894,7 @@ def setUp(self, m): "action_network:petition_id": "fake_id", "comments": "Stop doing the thing", "_links": { - "self": { - "href": f"{self.api_url}/petitions/fake_id/signatures/fake_id" - }, + "self": {"href": f"{self.api_url}/petitions/fake_id/signatures/fake_id"}, "osdi:petition": {"href": f"{self.api_url}/petitions/fake_id"}, "osdi:person": { "href": f"{self.api_url}/people/699da712-929f-11e3-a2e9-12313d316c29" @@ -3138,9 +2948,7 @@ def setUp(self, m): "action:person_id": "fake_id", "action_network:form_id": "fake_id", "_links": { - "self": { - "href": f"{self.api_url}/forms/fake_id/submissions/fake_id" - }, + "self": {"href": f"{self.api_url}/forms/fake_id/submissions/fake_id"}, "osdi:form": {"href": f"{self.api_url}/forms/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, }, @@ -3155,9 +2963,7 @@ def setUp(self, m): "action:person_id": "fake_id", "action_network:form_id": "fake_id", "_links": { - "self": { - "href": f"{self.api_url}/forms/fake_id/submissions/fake_id" - }, + "self": {"href": f"{self.api_url}/forms/fake_id/submissions/fake_id"}, "osdi:form": {"href": f"{self.api_url}/forms/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, }, @@ -3225,9 +3031,7 @@ def setUp(self, m): "name": "Volunteers", "_links": { "self": {"href": f"{self.api_url}/tags/fake_id"}, - "osdi:taggings": { - "href": f"{self.api_url}/tags/fake_id/taggings" - }, + "osdi:taggings": {"href": f"{self.api_url}/tags/fake_id/taggings"}, }, }, { @@ -3237,9 +3041,7 @@ def setUp(self, m): "name": "Economic Justice", "_links": { "self": {"href": f"{self.api_url}/tags/fake_id"}, - "osdi:taggings": { - "href": f"{self.api_url}/tags/fake_id/taggings" - }, + "osdi:taggings": {"href": f"{self.api_url}/tags/fake_id/taggings"}, }, }, ] @@ -3276,9 +3078,7 @@ def setUp(self, m): "osdi:taggings": [ { "_links": { - "self": { - "href": f"{self.api_url}/tags/fake_id/taggings/fake_id" - }, + "self": {"href": f"{self.api_url}/tags/fake_id/taggings/fake_id"}, "osdi:tag": {"href": f"{self.api_url}/tags/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, }, @@ -3289,9 +3089,7 @@ def setUp(self, m): }, { "_links": { - "self": { - "href": f"{self.api_url}/tags/fake_id/taggings/fake_id" - }, + "self": {"href": f"{self.api_url}/tags/fake_id/taggings/fake_id"}, "osdi:tag": {"href": f"{self.api_url}/tags/fake_id"}, "osdi:person": {"href": f"{self.api_url}/people/fake_id"}, }, @@ -3385,9 +3183,7 @@ def setUp(self, m): "action_network:suffix": " via ProgressivePower.org", "wrapper_type": "email", "default": True, - "_links": { - "self": {"href": f"{self.api_url}/wrappers/fake_id"} - }, + "_links": {"self": {"href": f"{self.api_url}/wrappers/fake_id"}}, }, { "identifiers": ["action_network:fake_id"], @@ -3416,9 +3212,7 @@ def setUp(self, m): " \r\n", "wrapper_type": "email", "default": False, - "_links": { - "self": {"href": f"{self.api_url}/wrappers/fake_id"} - }, + "_links": {"self": {"href": f"{self.api_url}/wrappers/fake_id"}}, }, ] }, @@ -3491,9 +3285,7 @@ def test_get_entry_list(self, m): f"{self.api_url}/people?page=3&per_page=25", text=json.dumps({"_embedded": {"osdi:people": []}}), ) - assert_matching_tables( - self.an._get_entry_list("people"), Table(self.fake_people_list) - ) + assert_matching_tables(self.an._get_entry_list("people"), Table(self.fake_people_list)) @requests_mock.Mocker() def test_filter_get_people(self, m): @@ -3583,9 +3375,7 @@ def test_get_person_attendances(self, m): ) assert_matching_tables( self.an.get_person_attendances("123", 1), - self.fake_attendances["_embedded"][ - list(self.fake_attendances["_embedded"])[0] - ], + self.fake_attendances["_embedded"][list(self.fake_attendances["_embedded"])[0]], ) @requests_mock.Mocker() @@ -3596,9 +3386,7 @@ def test_get_event_attendances(self, m): ) assert_matching_tables( self.an.get_event_attendances("123", 1), - self.fake_attendances["_embedded"][ - list(self.fake_attendances["_embedded"])[0] - ], + self.fake_attendances["_embedded"][list(self.fake_attendances["_embedded"])[0]], ) @requests_mock.Mocker() @@ -3705,9 +3493,7 @@ def test_get_fundraising_page_donations(self, m): @requests_mock.Mocker() def test_get_person_donations(self, m): - m.get( - f"{self.api_url}/people/123/donations", text=json.dumps(self.fake_donations) - ) + m.get(f"{self.api_url}/people/123/donations", text=json.dumps(self.fake_donations)) assert_matching_tables( self.an.get_person_donations("123", 1), self.fake_donations["_embedded"][list(self.fake_donations["_embedded"])[0]], @@ -3739,9 +3525,7 @@ def test_get_event_campaigns(self, m): ) assert_matching_tables( self.an.get_event_campaigns(1), - self.fake_event_campaigns["_embedded"][ - list(self.fake_event_campaigns["_embedded"])[0] - ], + self.fake_event_campaigns["_embedded"][list(self.fake_event_campaigns["_embedded"])[0]], ) @requests_mock.Mocker() @@ -3758,9 +3542,7 @@ def test_get_event_campaign(self, m): @requests_mock.Mocker() def test_create_event_campaign(self, m): payload = {"title": "Canvassing Events", "origin_system": "AmyforTexas.com"} - m.post( - f"{self.api_url}/event_campaigns", text=json.dumps(self.fake_event_campaign) - ) + m.post(f"{self.api_url}/event_campaigns", text=json.dumps(self.fake_event_campaign)) self.assertEqual( self.fake_event_campaign, self.an.create_event_campaign(payload), @@ -3993,11 +3775,7 @@ def test_create_message(self, m): "from": "Progressive Action Now", "reply_to": "jane@progressiveactionnow.org", "targets": [{"href": "https://actionnetwork.org/api/v2/queries/123"}], - "_links": { - "osdi:wrapper": { - "href": "https://actionnetwork.org/api/v2/wrappers/123" - } - }, + "_links": {"osdi:wrapper": {"href": "https://actionnetwork.org/api/v2/wrappers/123"}}, } m.post(f"{self.api_url}/messages", text=json.dumps(self.fake_message)) assert_matching_tables( @@ -4036,9 +3814,7 @@ def test_get_advocacy_campaign_outreaches(self, m): ) assert_matching_tables( self.an.get_advocacy_campaign_outreaches("123", 1), - self.fake_outreaches["_embedded"][ - list(self.fake_outreaches["_embedded"])[0] - ], + self.fake_outreaches["_embedded"][list(self.fake_outreaches["_embedded"])[0]], ) @requests_mock.Mocker() @@ -4049,9 +3825,7 @@ def test_get_person_outreaches(self, m): ) assert_matching_tables( self.an.get_person_outreaches("123", 1), - self.fake_outreaches["_embedded"][ - list(self.fake_outreaches["_embedded"])[0] - ], + self.fake_outreaches["_embedded"][list(self.fake_outreaches["_embedded"])[0]], ) @requests_mock.Mocker() @@ -4080,9 +3854,7 @@ def test_get_person_outreach(self, m): def test_create_outreach(self, m): payload = { "targets": [{"given_name": "Joe", "family_name": "Schmoe"}], - "_links": { - "osdi:person": {"href": "https://actionnetwork.org/api/v2/people/123"} - }, + "_links": {"osdi:person": {"href": "https://actionnetwork.org/api/v2/people/123"}}, } id = self.fake_advocacy_campaign["identifiers"][0].split(":")[-1] m.post( @@ -4139,9 +3911,7 @@ def test_get_person(self, m): @requests_mock.Mocker() def test_upsert_person(self, m): m.post(f"{self.api_url}/people", text=json.dumps(self.fake_upsert_person)) - self.assertEqual( - self.an.upsert_person(**self.fake_upsert_person), self.fake_upsert_person - ) + self.assertEqual(self.an.upsert_person(**self.fake_upsert_person), self.fake_upsert_person) @requests_mock.Mocker() def test_update_person(self, m): @@ -4208,9 +3978,7 @@ def test_update_petition(self, m): } m.put( - self.api_url - + "/petitions/" - + self.fake_petition["identifiers"][0].split(":")[1], + self.api_url + "/petitions/" + self.fake_petition["identifiers"][0].split(":")[1], text=json.dumps(fake_petition_data), ) response = self.an.update_petition( @@ -4251,9 +4019,7 @@ def test_get_petition_signatures(self, m): ) assert_matching_tables( self.an.get_petition_signatures("123", 1), - self.fake_signatures["_embedded"][ - list(self.fake_signatures["_embedded"])[0] - ], + self.fake_signatures["_embedded"][list(self.fake_signatures["_embedded"])[0]], ) @requests_mock.Mocker() @@ -4264,9 +4030,7 @@ def test_get_person_signatures(self, m): ) assert_matching_tables( self.an.get_person_signatures("123", 1), - self.fake_signatures["_embedded"][ - list(self.fake_signatures["_embedded"])[0] - ], + self.fake_signatures["_embedded"][list(self.fake_signatures["_embedded"])[0]], ) @requests_mock.Mocker() @@ -4297,9 +4061,7 @@ def test_create_signature(self, m): fake_signature_data = { "comments": self.fake_signature["comments"], "_links": { - "osdi:person": { - "href": self.fake_signature["_links"]["osdi:person"]["href"] - } + "osdi:person": {"href": self.fake_signature["_links"]["osdi:person"]["href"]} }, } @@ -4329,9 +4091,7 @@ def test_update_signature(self, m): ) # Call the method to update the signature - updated_signature = self.an.update_signature( - "456", "123", updated_signature_data - ) + updated_signature = self.an.update_signature("456", "123", updated_signature_data) # Assert that the correct data is being sent and the response is handled correctly assert_matching_tables(updated_signature, self.fake_signature) @@ -4345,9 +4105,7 @@ def test_get_form_submissions(self, m): ) assert_matching_tables( self.an.get_form_submissions("123", 1), - self.fake_submissions["_embedded"][ - list(self.fake_submissions["_embedded"])[0] - ], + self.fake_submissions["_embedded"][list(self.fake_submissions["_embedded"])[0]], ) @requests_mock.Mocker() @@ -4358,9 +4116,7 @@ def test_get_person_submissions(self, m): ) assert_matching_tables( self.an.get_person_submissions("123", 1), - self.fake_submissions["_embedded"][ - list(self.fake_submissions["_embedded"])[0] - ], + self.fake_submissions["_embedded"][list(self.fake_submissions["_embedded"])[0]], ) @requests_mock.Mocker() @@ -4404,9 +4160,7 @@ def test_update_submission(self, m): json={"identifiers": ["other-system:230125s"]}, ) assert_matching_tables( - self.an.update_submission( - "123", "123", {"identifiers": ["other-system:230125s"]} - ), + self.an.update_submission("123", "123", {"identifiers": ["other-system:230125s"]}), self.fake_submission, ) @@ -4427,9 +4181,7 @@ def test_get_tags(self, m): @requests_mock.Mocker() def test_get_tag(self, m): - m.get( - f"{self.api_url}/tags/{self.fake_tag_id_1}", text=json.dumps(self.fake_tag) - ) + m.get(f"{self.api_url}/tags/{self.fake_tag_id_1}", text=json.dumps(self.fake_tag)) self.assertEqual(self.an.get_tag(self.fake_tag_id_1), self.fake_tag) # Taggings @@ -4446,9 +4198,7 @@ def test_get_taggings(self, m): @requests_mock.Mocker() def test_get_tagging(self, m): - m.get( - f"{self.api_url}/tags/123/taggings/123", text=json.dumps(self.fake_tagging) - ) + m.get(f"{self.api_url}/tags/123/taggings/123", text=json.dumps(self.fake_tagging)) assert_matching_tables( self.an.get_tagging("123", "123"), self.fake_tagging, diff --git a/test/test_alchemer/test_getresponses.py b/test/test_alchemer/test_getresponses.py index 6e7dfb9b4f..c529681b0d 100644 --- a/test/test_alchemer/test_getresponses.py +++ b/test/test_alchemer/test_getresponses.py @@ -34,9 +34,7 @@ def test_get_responses_single_page(self): self.assertEqual(2, actual_responses.num_rows) for i in range(0, 1): - self.assertEqual( - api_return["data"][i]["session_id"], actual_responses[i]["session_id"] - ) + self.assertEqual(api_return["data"][i]["session_id"], actual_responses[i]["session_id"]) def _get_responses_return_single_page(self): return { diff --git a/test/test_auth0.py b/test/test_auth0.py index 3f54bf7979..82ba411cd5 100644 --- a/test/test_auth0.py +++ b/test/test_auth0.py @@ -37,9 +37,7 @@ def test_get_users_by_email(self, m): f"{self.auth0.base_url}/api/v2/users-by-email?email={email}", json=mock_users, ) - assert_matching_tables( - self.auth0.get_users_by_email(email), Table(mock_users), True - ) + assert_matching_tables(self.auth0.get_users_by_email(email), Table(mock_users), True) @requests_mock.Mocker() def test_retrieve_all_users(self, m): diff --git a/test/test_azure/test_azure_blob_storage.py b/test/test_azure/test_azure_blob_storage.py index b9ded2678f..1fe25ba8c3 100644 --- a/test/test_azure/test_azure_blob_storage.py +++ b/test/test_azure/test_azure_blob_storage.py @@ -50,9 +50,7 @@ def test_container_exists(self): def test_get_container(self): # Assert that a ContainerClient object is returned - self.assertIsInstance( - self.azure_blob.get_container(TEST_CONTAINER_NAME), ContainerClient - ) + self.assertIsInstance(self.azure_blob.get_container(TEST_CONTAINER_NAME), ContainerClient) def test_create_container(self): @@ -97,9 +95,7 @@ def test_list_blobs(self): def test_blob_exists(self): # Assert that blob created in setup exists - self.assertTrue( - self.azure_blob.blob_exists(TEST_CONTAINER_NAME, TEST_FILE_NAME) - ) + self.assertTrue(self.azure_blob.blob_exists(TEST_CONTAINER_NAME, TEST_FILE_NAME)) # Assert that invalid blob does not exist self.assertFalse(self.azure_blob.blob_exists(TEST_CONTAINER_NAME, "FAKE_BLOB")) @@ -114,9 +110,7 @@ def test_get_blob(self): def test_get_blob_url(self): # Assert that get_blob_url returns a URL with a shared access signature - blob_url = self.azure_blob.get_blob_url( - TEST_CONTAINER_NAME, TEST_FILE_NAME, permission="r" - ) + blob_url = self.azure_blob.get_blob_url(TEST_CONTAINER_NAME, TEST_FILE_NAME, permission="r") parsed_blob_url = urlparse(blob_url) parsed_blob_query = parse_qs(parsed_blob_url.query) self.assertIn("sas", parsed_blob_query) @@ -137,9 +131,7 @@ def test_put_blob(self): def test_download_blob(self): # Download blob and ensure that it has the expected file contents - download_blob_path = self.azure_blob.download_blob( - TEST_CONTAINER_NAME, TEST_FILE_NAME - ) + download_blob_path = self.azure_blob.download_blob(TEST_CONTAINER_NAME, TEST_FILE_NAME) with open(download_blob_path, "r") as f: self.assertEqual(f.read(), TEST_FILE_CONTENTS) @@ -150,15 +142,11 @@ def test_delete_blob(self): # Upload a blob, assert that it exists tmp_file_path = files.string_to_temp_file(TEST_FILE_CONTENTS, suffix=".txt") self.azure_blob.put_blob(TEST_CONTAINER_NAME, delete_blob_name, tmp_file_path) - self.assertTrue( - self.azure_blob.blob_exists(TEST_CONTAINER_NAME, delete_blob_name) - ) + self.assertTrue(self.azure_blob.blob_exists(TEST_CONTAINER_NAME, delete_blob_name)) # Delete the blob, assert that it no longer exists self.azure_blob.delete_blob(TEST_CONTAINER_NAME, delete_blob_name) - self.assertFalse( - self.azure_blob.blob_exists(TEST_CONTAINER_NAME, delete_blob_name) - ) + self.assertFalse(self.azure_blob.blob_exists(TEST_CONTAINER_NAME, delete_blob_name)) def test_upload_table(self): @@ -170,9 +158,7 @@ def test_upload_table(self): test_table, TEST_CONTAINER_NAME, test_table_blob_name, data_type="csv" ) table_blob_client_properties = table_blob_client.get_blob_properties() - self.assertEqual( - table_blob_client_properties.content_settings.content_type, "text/csv" - ) + self.assertEqual(table_blob_client_properties.content_settings.content_type, "text/csv") # Remove blob after assertion self.azure_blob.delete_blob(TEST_CONTAINER_NAME, test_table_blob_name) diff --git a/test/test_bill_com/test_bill_com.py b/test/test_bill_com/test_bill_com.py index 2014788259..f729a26369 100644 --- a/test/test_bill_com/test_bill_com.py +++ b/test/test_bill_com/test_bill_com.py @@ -249,9 +249,7 @@ def test_get_request_response(self, m): @requests_mock.Mocker() def test_get_user_list(self, m): m.post(self.api_url + "List/User.json", text=json.dumps(self.fake_user_list)) - assert_matching_tables( - self.bc.get_user_list(), Table(self.fake_user_list["response_data"]) - ) + assert_matching_tables(self.bc.get_user_list(), Table(self.fake_user_list["response_data"])) @requests_mock.Mocker() def test_get_customer_list(self, m): @@ -265,9 +263,7 @@ def test_get_customer_list(self, m): @requests_mock.Mocker() def test_get_invoice_list(self, m): - m.post( - self.api_url + "List/Invoice.json", text=json.dumps(self.fake_invoice_list) - ) + m.post(self.api_url + "List/Invoice.json", text=json.dumps(self.fake_invoice_list)) assert_matching_tables( self.bc.get_invoice_list(), Table(self.fake_invoice_list["response_data"]) ) @@ -296,9 +292,7 @@ def test_read_invoice(self, m): def test_check_customer(self): self.assertTrue( - self.bc.check_customer( - {"id": "fake_customer_id"}, {"id": "fake_customer_id"} - ) + self.bc.check_customer({"id": "fake_customer_id"}, {"id": "fake_customer_id"}) ) self.assertTrue( self.bc.check_customer( @@ -307,9 +301,7 @@ def test_check_customer(self): ) ) self.assertFalse( - self.bc.check_customer( - {"id": "fake_customer_id1"}, {"id": "fake_customer_id2"} - ) + self.bc.check_customer({"id": "fake_customer_id1"}, {"id": "fake_customer_id2"}) ) self.assertFalse( self.bc.check_customer( @@ -329,9 +321,7 @@ def test_get_or_create_customer(self, m): text=json.dumps(self.fake_customer_read_json), ) self.assertEqual( - self.bc.get_or_create_customer( - "fake_customer_name", self.fake_customer_email - ), + self.bc.get_or_create_customer("fake_customer_name", self.fake_customer_email), self.fake_customer_read_json["response_data"], ) diff --git a/test/test_bloomerang/test_bloomerang.py b/test/test_bloomerang/test_bloomerang.py index c4bb2a343d..19491cfeca 100644 --- a/test/test_bloomerang/test_bloomerang.py +++ b/test/test_bloomerang/test_bloomerang.py @@ -40,15 +40,11 @@ def test_authentication(self, m): # OAuth2 m.post(url=bloomerang.uri_auth, json={"code": "my_auth_code"}) - m.post( - url=bloomerang.uri + "oauth/token", json={"access_token": "my_access_token"} - ) + m.post(url=bloomerang.uri + "oauth/token", json={"access_token": "my_access_token"}) bloomerang = Bloomerang(client_id="my_id", client_secret="my_secret") self.assertEqual(bloomerang.authorization_code, "my_auth_code") self.assertEqual(bloomerang.access_token, "my_access_token") - self.assertEqual( - bloomerang.conn.headers["Authorization"], "Bearer my_access_token" - ) + self.assertEqual(bloomerang.conn.headers["Authorization"], "Bearer my_access_token") def test_base_endpoint(self): url = self.bloomerang._base_endpoint("constituent") @@ -68,9 +64,7 @@ def test_create_constituent(self, m): @requests_mock.Mocker() def test_update_constituent(self, m): m.put(f"{self.bloomerang.uri}constituent/{ID}/", json=TEST_CREATE_CONSTITUENT) - self.assertEqual( - self.bloomerang.update_constituent(ID), TEST_CREATE_CONSTITUENT - ) + self.assertEqual(self.bloomerang.update_constituent(ID), TEST_CREATE_CONSTITUENT) @requests_mock.Mocker() def test_get_constituent(self, m): @@ -100,9 +94,7 @@ def test_create_transaction(self, m): @requests_mock.Mocker() def test_update_transaction(self, m): m.put(f"{self.bloomerang.uri}transaction/{ID}/", json=TEST_CREATE_TRANSACTION) - self.assertEqual( - self.bloomerang.update_transaction(ID), TEST_CREATE_TRANSACTION - ) + self.assertEqual(self.bloomerang.update_transaction(ID), TEST_CREATE_TRANSACTION) @requests_mock.Mocker() def test_get_transaction(self, m): @@ -130,9 +122,7 @@ def test_get_transaction_designation(self, m): f"{self.bloomerang.uri}transaction/designation/{ID}/", json=TEST_GET_TRANSACTION, ) - self.assertEqual( - self.bloomerang.get_transaction_designation(ID), TEST_GET_TRANSACTION - ) + self.assertEqual(self.bloomerang.get_transaction_designation(ID), TEST_GET_TRANSACTION) @requests_mock.Mocker() def test_get_transaction_designations(self, m): @@ -153,9 +143,7 @@ def test_create_interaction(self, m): @requests_mock.Mocker() def test_update_interaction(self, m): m.put(f"{self.bloomerang.uri}interaction/{ID}/", json=TEST_CREATE_INTERACTION) - self.assertEqual( - self.bloomerang.update_interaction(ID), TEST_CREATE_INTERACTION - ) + self.assertEqual(self.bloomerang.update_interaction(ID), TEST_CREATE_INTERACTION) @requests_mock.Mocker() def test_get_interaction(self, m): diff --git a/test/test_bloomerang/test_data.py b/test/test_bloomerang/test_data.py index 93270a7c6f..81452dfd95 100644 --- a/test/test_bloomerang/test_data.py +++ b/test/test_bloomerang/test_data.py @@ -436,9 +436,7 @@ }, "Type": "Pledge", "PledgePaymentIds": [0], - "PledgeInstallments": [ - {"Id": 0, "PledgeId": 0, "Date": "2020-09-08", "Amount": 0} - ], + "PledgeInstallments": [{"Id": 0, "PledgeId": 0, "Date": "2020-09-08", "Amount": 0}], "PledgeBalance": 0, "PledgeStatus": "InGoodStanding", "PledgeAmountInArrears": 0, diff --git a/test/test_bluelink/test_bluelink.py b/test/test_bluelink/test_bluelink.py index 2b80cf005a..5c15ea5f70 100644 --- a/test/test_bluelink/test_bluelink.py +++ b/test/test_bluelink/test_bluelink.py @@ -99,9 +99,7 @@ def test_table_to_people(self): # expected: person1 = BluelinkPerson( identifiers=[ - BluelinkIdentifier( - source="FAKESOURCE", identifier="bart@springfield.net" - ) + BluelinkIdentifier(source="FAKESOURCE", identifier="bart@springfield.net") ], emails=[BluelinkEmail(address="bart@springfield.net", primary=True)], family_name="Simpson", @@ -109,9 +107,7 @@ def test_table_to_people(self): ) person2 = BluelinkPerson( identifiers=[ - BluelinkIdentifier( - source="FAKESOURCE", identifier="homer@springfield.net" - ) + BluelinkIdentifier(source="FAKESOURCE", identifier="homer@springfield.net") ], emails=[BluelinkEmail(address="homer@springfield.net", primary=True)], family_name="Simpson", diff --git a/test/test_box/test_box_storage.py b/test/test_box/test_box_storage.py index 25113dfc2e..c768137a36 100644 --- a/test/test_box/test_box_storage.py +++ b/test/test_box/test_box_storage.py @@ -30,9 +30,7 @@ def generate_random_string(length): @unittest.skipIf(not os.getenv("LIVE_TEST"), "Skipping because not running live test") class TestBoxStorage(unittest.TestCase): def setUp(self) -> None: - warnings.filterwarnings( - action="ignore", message="unclosed", category=ResourceWarning - ) + warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning) # Create a client that we'll use to manipulate things behind the scenes self.client = Box() @@ -93,8 +91,7 @@ def test_list_files_by_path(self) -> None: break self.assertTrue( found_default, - f"Failed to find test folder f{self.temp_folder_name} " - f"in default Box folder", + f"Failed to find test folder f{self.temp_folder_name} " f"in default Box folder", ) subfolder_name = "path_subfolder" @@ -171,9 +168,7 @@ def test_upload_file(self) -> None: # Check that we throw an exception with bad formats with self.assertRaises(ValueError): - box.upload_table_to_folder_id( - table, "phone_numbers", format="illegal_format" - ) + box.upload_table_to_folder_id(table, "phone_numbers", format="illegal_format") with self.assertRaises(ValueError): box.get_table_by_file_id(box_file.id, format="illegal_format") @@ -239,9 +234,7 @@ def test_get_item_id(self) -> None: # Nonexistent file with self.assertRaises(ValueError): - file_path = ( - f"{self.temp_folder_name}/item_subfolder/nonexistent/phone_numbers" - ) + file_path = f"{self.temp_folder_name}/item_subfolder/nonexistent/phone_numbers" box.get_item_id(path=file_path) # File (rather than folder) in middle of path @@ -287,9 +280,7 @@ def test_errors(self) -> None: # Create folder in non-existent parent with self.assertLogs(level=logging.WARNING): with self.assertRaises(BoxAPIException): - box.create_folder_by_id( - folder_name="subfolder", parent_folder_id=nonexistent_id - ) + box.create_folder_by_id(folder_name="subfolder", parent_folder_id=nonexistent_id) # Try using bad credentials box = Box(access_token="5345345345") diff --git a/test/test_braintree/test_braintree.py b/test/test_braintree/test_braintree.py index 8cdddeca9a..8696dfa610 100644 --- a/test/test_braintree/test_braintree.py +++ b/test/test_braintree/test_braintree.py @@ -25,9 +25,7 @@ def test_dispute_search(self, m): "/merchants/abcd1234abcd1234/disputes/advanced_search?page=1", text=open(f"{_dir}/test_data/dispute_example.xml").read(), ) - table = self.braintree.get_disputes( - start_date="2020-01-01", end_date="2020-01-02" - ) + table = self.braintree.get_disputes(start_date="2020-01-01", end_date="2020-01-02") self.assertEqual(len(table.table), 3) self.assertEqual(table[0]["id"], "abcd1234abcd1234") @@ -72,9 +70,7 @@ def test_transaction_search(self, m): self.assertEqual(len(table[0].keys()), 1) self.assertEqual(len(full_table[0].keys()), 67) - self.assertEqual( - full_table[0]["disbursement_date"], datetime.date(2019, 12, 30) - ) + self.assertEqual(full_table[0]["disbursement_date"], datetime.date(2019, 12, 30)) self.assertEqual(full_table[0]["credit_card_bin"], "789234") self.assertEqual(full_table[0]["disbursement_success"], True) self.assertEqual(full_table[0]["amount"], decimal.Decimal("150.00")) @@ -113,12 +109,8 @@ def test_subscription_search(self, m): self.assertEqual(len(table[0].keys()), 1) self.assertEqual(len(full_table[0].keys()), 33) - self.assertEqual( - full_table[0]["first_billing_date"], datetime.date(2022, 8, 22) - ) - self.assertEqual( - full_table[0]["transactions"][0].credit_card_details.bin, "999" - ) + self.assertEqual(full_table[0]["first_billing_date"], datetime.date(2022, 8, 22)) + self.assertEqual(full_table[0]["transactions"][0].credit_card_details.bin, "999") self.assertEqual(full_table[0]["never_expires"], True) self.assertEqual(full_table[0]["price"], decimal.Decimal("10.00")) @@ -128,9 +120,7 @@ def test_query_generation(self): **{"disbursement_date": {"between": ["2020-01-01", "2020-01-01"]}}, ) self.assertEqual(query[0].name, "disbursement_date") - self.assertEqual( - query[0].to_param(), {"min": "2020-01-01", "max": "2020-01-01"} - ) + self.assertEqual(query[0].to_param(), {"min": "2020-01-01", "max": "2020-01-01"}) query = self.braintree._get_query_objects( "transaction", **{"merchant_account_id": {"in_list": ["abc123"]}} @@ -148,6 +138,4 @@ def test_query_generation(self): ) self.assertEqual(query[0].name, "merchant_account_id") self.assertEqual(query[1].name, "effective_date") - self.assertEqual( - query[1].to_param(), {"min": "2020-01-01", "max": "2020-01-01"} - ) + self.assertEqual(query[1].to_param(), {"min": "2020-01-01", "max": "2020-01-01"}) diff --git a/test/test_capitol_canary.py b/test/test_capitol_canary.py index c0916b82f3..7fc64163cf 100644 --- a/test/test_capitol_canary.py +++ b/test/test_capitol_canary.py @@ -59,9 +59,7 @@ }, ], "fields": [], - "phones": [ - {"id": 10537860, "address": "+19995206447", "subscribed": "false"} - ], + "phones": [{"id": 10537860, "address": "+19995206447", "subscribed": "false"}], "emails": [ {"id": 10537871, "address": "N@k.com", "subscribed": "false"}, {"id": 10950446, "address": "email@me.com", "subscribed": "false"}, @@ -198,9 +196,7 @@ def test_get_advocates(self, m): "memberships_name", "memberships_source", ] - self.assertTrue( - validate_list(member_exp, self.cc.get_advocates()["memberships"]) - ) + self.assertTrue(validate_list(member_exp, self.cc.get_advocates()["memberships"])) fields_exp = ["advocate_id", "fields"] self.assertTrue(validate_list(fields_exp, self.cc.get_advocates()["fields"])) @@ -269,24 +265,18 @@ def test_create_advocate(self, m): # Test arg validation - create requires a phone or an email self.assertRaises( ValueError, - lambda: self.cc.create_advocate( - campaigns=[1], firstname="Foo", lastname="bar" - ), + lambda: self.cc.create_advocate(campaigns=[1], firstname="Foo", lastname="bar"), ) # Test arg validation - sms opt in requires a phone self.assertRaises( ValueError, - lambda: self.cc.create_advocate( - campaigns=[1], email="foo@bar.com", sms_optin=True - ), + lambda: self.cc.create_advocate(campaigns=[1], email="foo@bar.com", sms_optin=True), ) # Test arg validation - email opt in requires a email self.assertRaises( ValueError, - lambda: self.cc.create_advocate( - campaigns=[1], phone="1234567890", email_optin=True - ), + lambda: self.cc.create_advocate(campaigns=[1], phone="1234567890", email_optin=True), ) # Test a successful call diff --git a/test/test_catalist/test_catalist.py b/test/test_catalist/test_catalist.py index 834b510f92..b6b2a075dc 100644 --- a/test/test_catalist/test_catalist.py +++ b/test/test_catalist/test_catalist.py @@ -94,9 +94,7 @@ def test_upload(self, mock_requests) -> None: assert requested_base_url == "api.catalist.us" assert set(requested_queries.keys()) == set(["token"]) assert requested_queries["token"] == ["tokenexample"] - assert requested_endpoint.startswith( - "/mapi/upload/template/48827/action/publish/url/" - ) + assert requested_endpoint.startswith("/mapi/upload/template/48827/action/publish/url/") def test_upload_with_options(self, mock_requests) -> None: """Mock use of upload() method with options, check API calls.""" diff --git a/test/test_census/test_census.py b/test/test_census/test_census.py index c219702dd1..795234176e 100644 --- a/test/test_census/test_census.py +++ b/test/test_census/test_census.py @@ -29,8 +29,6 @@ def test_get_census_mock_test(self, m): variables = "NAME,B01001_001E" location = "for=us:1" test_json = {"NAME": "United States", "B01001_001E": "328239523", "us": "1"} - table = m.census.get_census( - year, dataset_acronym, variables, location, json=test_json - ) + table = m.census.get_census(year, dataset_acronym, variables, location, json=test_json) self.assertEqual(table[0]["B01001_001E"], "328239523") self.assertEqual(table[0]["NAME"], "United States") diff --git a/test/test_civis.py b/test/test_civis.py index 1eed0e7340..7f960b09d6 100644 --- a/test/test_civis.py +++ b/test/test_civis.py @@ -5,9 +5,7 @@ # from . import scratch_creds -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestCivisClient(unittest.TestCase): def setUp(self): diff --git a/test/test_controlshift/test_controlshift.py b/test/test_controlshift/test_controlshift.py index 47cffd9c69..b0484c9275 100644 --- a/test/test_controlshift/test_controlshift.py +++ b/test/test_controlshift/test_controlshift.py @@ -20,9 +20,7 @@ def setUp(self): @requests_mock.Mocker() def test_get_petitions(self, m): m.post(f"{self.hostname}/oauth/token", json={"access_token": "123"}) - cs = Controlshift( - hostname=self.hostname, client_id="1234", client_secret="1234" - ) + cs = Controlshift(hostname=self.hostname, client_id="1234", client_secret="1234") m.get(f"{self.hostname}/api/v1/petitions", json=test_data.petition_test_data) tbl = cs.get_petitions() diff --git a/test/test_copper/test_copper.py b/test/test_copper/test_copper.py index 6c34d64b8a..b1d2e647b1 100644 --- a/test/test_copper/test_copper.py +++ b/test/test_copper/test_copper.py @@ -266,14 +266,10 @@ def test_paginate_request(self, m): {"number": "(541) 555-9585", "category": "work"}, {"number": "555-555-9585", "category": "work"}, ], - "socials": [ - {"url": "https://gravatar.com/gravatar", "category": "gravatar"} - ], + "socials": [{"url": "https://gravatar.com/gravatar", "category": "gravatar"}], "tags": [], "title": None, - "websites": [ - {"url": "http://www.IndivisibleCityA.org", "category": None} - ], + "websites": [{"url": "http://www.IndivisibleCityA.org", "category": None}], "custom_fields": [ {"custom_field_definition_id": 125880, "value": None}, {"custom_field_definition_id": 107297, "value": None}, @@ -393,9 +389,7 @@ def test_paginate_request(self, m): # self.assertTrue( assert_matching_tables( Table(self.blob), - Table( - self.cp.paginate_request("/people/search", page_size=1, req_type="POST") - ), + Table(self.cp.paginate_request("/people/search", page_size=1, req_type="POST")), ) def test_process_json(self): @@ -525,9 +519,7 @@ def test_process_custom_fields(self): fake_response = json.load(json_file) fake_processed = self.cp.process_custom_fields(fake_response) - self.assertTrue( - [f["name"] for f in fake_processed] == self.custom_field_table_names - ) + self.assertTrue([f["name"] for f in fake_processed] == self.custom_field_table_names) for tbl in self.custom_field_table_names: assert_matching_tables( [f["tbl"] for f in fake_processed if f["name"] == tbl][0], @@ -567,9 +559,7 @@ def test_get_standard_object(self, m): # So the following line is the only difference from test_get_people() processed_blob = self.cp.get_standard_object("people") blob_people = [f for f in processed_blob if f["name"] == "people"][0]["tbl"] - blob_people_emails = [ - f for f in processed_blob if f["name"] == "people_emails" - ][0]["tbl"] + blob_people_emails = [f for f in processed_blob if f["name"] == "people_emails"][0]["tbl"] assert_matching_tables(self.processed_people, blob_people) assert_matching_tables(processed_people_emails, blob_people_emails) @@ -604,9 +594,7 @@ def test_get_people(self, m): ) processed_blob = self.cp.get_people() blob_people = [f for f in processed_blob if f["name"] == "people"][0]["tbl"] - blob_people_emails = [ - f for f in processed_blob if f["name"] == "people_emails" - ][0]["tbl"] + blob_people_emails = [f for f in processed_blob if f["name"] == "people_emails"][0]["tbl"] # Actually testing get_standard_object() and process_json() # Dicts & simple lists are unpacked to columns on original table @@ -754,12 +742,8 @@ def test_get_opportunities(self, m): ) processed_blob = self.cp.get_opportunities() - blob_opps = [f for f in processed_blob if f["name"] == "opportunities"][0][ - "tbl" - ] - blob_opps_cf = [ - f for f in processed_blob if f["name"] == "opportunities_custom_fields" - ] + blob_opps = [f for f in processed_blob if f["name"] == "opportunities"][0]["tbl"] + blob_opps_cf = [f for f in processed_blob if f["name"] == "opportunities_custom_fields"] blob_opps_cf = blob_opps_cf[0]["tbl"] assert_matching_tables(processed_opps, blob_opps) @@ -902,12 +886,8 @@ def test_get_opportunities2(self, m): ) processed_blob = self.cp.get_opportunities() - blob_opps = [f for f in processed_blob if f["name"] == "opportunities"][0][ - "tbl" - ] - blob_opps_cf = [ - f for f in processed_blob if f["name"] == "opportunities_custom_fields" - ] + blob_opps = [f for f in processed_blob if f["name"] == "opportunities"][0]["tbl"] + blob_opps_cf = [f for f in processed_blob if f["name"] == "opportunities_custom_fields"] blob_opps_cf = blob_opps_cf[0]["tbl"] assert_matching_tables(processed_opps, blob_opps) @@ -1023,9 +1003,7 @@ def test_get_companies(self, m): ) processed_blob = self.cp.get_companies() - blob_companies = [f for f in processed_blob if f["name"] == "companies"][0][ - "tbl" - ] + blob_companies = [f for f in processed_blob if f["name"] == "companies"][0]["tbl"] blob_companies_phones = [ f for f in processed_blob if f["name"] == "companies_phone_numbers" ][0]["tbl"] @@ -1105,9 +1083,7 @@ def test_get_custom_fields(self, m): ) processed_blob = self.cp.get_custom_fields() - self.assertTrue( - [f["name"] for f in processed_blob] == self.custom_field_table_names - ) + self.assertTrue([f["name"] for f in processed_blob] == self.custom_field_table_names) for tbl in self.custom_field_table_names: assert_matching_tables( [f["tbl"] for f in processed_blob if f["name"] == tbl][0], diff --git a/test/test_credential_tools.py b/test/test_credential_tools.py index 6c59b46a68..9f44d9b76a 100644 --- a/test/test_credential_tools.py +++ b/test/test_credential_tools.py @@ -91,10 +91,7 @@ def test_encode_from_json_file(self): def testencode_from_env(self): lst = ["TES_VAR1", "TES_VAR2"] - expected = ( - "PRSNSENVeyJURVNfVkFSMSI6ICJ2YXJpYWJsZTEiLCAiVEVTX1ZBU" - "jIiOiAidmFyaWFibGUyIn0=" - ) + expected = "PRSNSENVeyJURVNfVkFSMSI6ICJ2YXJpYWJsZTEiLCAiVEVTX1ZBU" "jIiOiAidmFyaWFibGUyIn0=" self.assertEqual(ct.encode_from_env(lst), expected) diff --git a/test/test_crowdtangle/test_crowdtangle.py b/test/test_crowdtangle/test_crowdtangle.py index da12ebba81..62454d4f40 100644 --- a/test/test_crowdtangle/test_crowdtangle.py +++ b/test/test_crowdtangle/test_crowdtangle.py @@ -27,9 +27,7 @@ def test_get_leaderboard(self, m): m.get(self.ct.uri + "/leaderboard", json=expected_leaderboard) leaderboard = self.ct.get_leaderboard() - exp_tbl = self.ct._unpack( - Table(expected_leaderboard["result"]["accountStatistics"]) - ) + exp_tbl = self.ct._unpack(Table(expected_leaderboard["result"]["accountStatistics"])) assert_matching_tables(leaderboard, exp_tbl) @requests_mock.Mocker() diff --git a/test/test_databases/test_bigquery.py b/test/test_databases/test_bigquery.py index 353ea4115f..080bc709a9 100644 --- a/test/test_databases/test_bigquery.py +++ b/test/test_databases/test_bigquery.py @@ -24,9 +24,7 @@ class FakeGoogleCloudStorage(GoogleCloudStorage): def __init__(self): super().__init__(None, None) - def upload_table( - self, table, bucket_name, blob_name, data_type="csv", default_acl=None - ): + def upload_table(self, table, bucket_name, blob_name, data_type="csv", default_acl=None): pass def delete_blob(self, bucket_name, blob_name): @@ -108,12 +106,7 @@ def test_query_with_transaction(self, create_temp_file_mock): # Check that queries and transaction keywords are included in sql self.assertTrue( - all( - [ - text in keyword_args["sql"] - for text in queries + ["BEGIN TRANSACTION", "COMMIT"] - ] - ) + all([text in keyword_args["sql"] for text in queries + ["BEGIN TRANSACTION", "COMMIT"]]) ) self.assertEqual(keyword_args["parameters"], parameters) self.assertFalse(keyword_args["return_values"]) @@ -137,9 +130,7 @@ def test_copy_gcs(self): self.assertEqual(load_call_args[1]["source_uris"], tmp_blob_uri) job_config = load_call_args[1]["job_config"] - self.assertEqual( - job_config.write_disposition, bigquery.WriteDisposition.WRITE_EMPTY - ) + self.assertEqual(job_config.write_disposition, bigquery.WriteDisposition.WRITE_EMPTY) def test_copy_gcs__if_exists_truncate(self): # setup dependencies / inputs @@ -161,9 +152,7 @@ def test_copy_gcs__if_exists_truncate(self): self.assertEqual(load_call_args[1]["source_uris"], tmp_blob_uri) job_config = load_call_args[1]["job_config"] - self.assertEqual( - job_config.write_disposition, bigquery.WriteDisposition.WRITE_TRUNCATE - ) + self.assertEqual(job_config.write_disposition, bigquery.WriteDisposition.WRITE_TRUNCATE) def test_copy_gcs__if_exists_append(self): # setup dependencies / inputs @@ -185,9 +174,7 @@ def test_copy_gcs__if_exists_append(self): self.assertEqual(load_call_args[1]["source_uris"], tmp_blob_uri) job_config = load_call_args[1]["job_config"] - self.assertEqual( - job_config.write_disposition, bigquery.WriteDisposition.WRITE_APPEND - ) + self.assertEqual(job_config.write_disposition, bigquery.WriteDisposition.WRITE_APPEND) def test_copy_gcs__if_exists_fail(self): # setup dependencies / inputs @@ -251,12 +238,8 @@ def test_copy_gcs__bad_if_exists(self): ) @mock.patch("google.cloud.storage.Client") - @mock.patch.object( - GoogleCloudStorage, "split_uri", return_value=("tmp", "file.gzip") - ) - @mock.patch.object( - GoogleCloudStorage, "unzip_blob", return_value="gs://tmp/file.csv" - ) + @mock.patch.object(GoogleCloudStorage, "split_uri", return_value=("tmp", "file.gzip")) + @mock.patch.object(GoogleCloudStorage, "unzip_blob", return_value="gs://tmp/file.csv") def test_copy_large_compressed_file_from_gcs( self, unzip_mock: mock.MagicMock, split_mock: mock.MagicMock, *_ ): @@ -290,9 +273,7 @@ def test_copy_large_compressed_file_from_gcs( self.assertEqual(load_call_args[1]["source_uris"], "gs://tmp/file.csv") job_config = load_call_args[1]["job_config"] - self.assertEqual( - job_config.write_disposition, bigquery.WriteDisposition.WRITE_EMPTY - ) + self.assertEqual(job_config.write_disposition, bigquery.WriteDisposition.WRITE_EMPTY) def test_copy_s3(self): # setup dependencies / inputs diff --git a/test/test_databases/test_dbsync.py b/test/test_databases/test_dbsync.py index bcfc1c0781..7955399eaf 100644 --- a/test/test_databases/test_dbsync.py +++ b/test/test_databases/test_dbsync.py @@ -11,9 +11,7 @@ # These tests interact directly with the Postgres database. In order to run, set the # env to LIVE_TEST='TRUE'. -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestPostgresDBSync(unittest.TestCase): def setUp(self): @@ -143,9 +141,7 @@ def test_table_sync_incremental_empty_table(self): # These tests interact directly with the Postgres database. In order to run, set the # env to LIVE_TEST='TRUE'. -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestRedshiftDBSync(TestPostgresDBSync): """This test inherits all of the tests from the Postgres test.""" @@ -261,9 +257,7 @@ def test_table_sync_full_without_retry(self): self.fake_destination.setup_table("destination", Table(), failures=1) # Make sure the sync results in an exception - self.assertRaises( - ValueError, lambda: dbsync.table_sync_full("source", "destination") - ) + self.assertRaises(ValueError, lambda: dbsync.table_sync_full("source", "destination")) def test_table_sync_full_order_by(self): dbsync = DBSync(self.fake_source, self.fake_destination) diff --git a/test/test_databases/test_discover_database.py b/test/test_databases/test_discover_database.py index 4f8fbf647e..51b4771bf2 100644 --- a/test/test_databases/test_discover_database.py +++ b/test/test_databases/test_discover_database.py @@ -24,9 +24,7 @@ def test_no_database_detected(self, mock_getenv, *_): @patch.object(Redshift, "__init__", return_value=None) @patch("os.getenv") def test_single_database_detected(self, mock_getenv, *_): - mock_getenv.side_effect = ( - lambda var: "password" if var == "REDSHIFT_PASSWORD" else None - ) + mock_getenv.side_effect = lambda var: "password" if var == "REDSHIFT_PASSWORD" else None self.assertIsInstance(discover_database(), Redshift) @patch.object(BigQuery, "__init__", return_value=None) @@ -35,9 +33,7 @@ def test_single_database_detected(self, mock_getenv, *_): @patch.object(Redshift, "__init__", return_value=None) @patch("os.getenv") def test_single_database_detected_with_other_default(self, mock_getenv, *_): - mock_getenv.side_effect = ( - lambda var: "password" if var == "REDSHIFT_PASSWORD" else None - ) + mock_getenv.side_effect = lambda var: "password" if var == "REDSHIFT_PASSWORD" else None self.assertIsInstance(discover_database(default_connector=Postgres), Redshift) @patch.object(BigQuery, "__init__", return_value=None) @@ -46,12 +42,8 @@ def test_single_database_detected_with_other_default(self, mock_getenv, *_): @patch.object(Redshift, "__init__", return_value=None) @patch("os.getenv") def test_single_database_detected_with_other_default_list(self, mock_getenv, *_): - mock_getenv.side_effect = ( - lambda var: "password" if var == "REDSHIFT_PASSWORD" else None - ) - self.assertIsInstance( - discover_database(default_connector=[Postgres, MySQL]), Redshift - ) + mock_getenv.side_effect = lambda var: "password" if var == "REDSHIFT_PASSWORD" else None + self.assertIsInstance(discover_database(default_connector=[Postgres, MySQL]), Redshift) @patch.object(BigQuery, "__init__", return_value=None) @patch.object(Postgres, "__init__", return_value=None) @@ -79,9 +71,7 @@ def test_multiple_databases_with_default(self, mock_getenv, *_): @patch("os.getenv") def test_multiple_databases_with_default_list(self, mock_getenv, *_): mock_getenv.return_value = "password" - self.assertIsInstance( - discover_database(default_connector=[MySQL, Redshift]), MySQL - ) + self.assertIsInstance(discover_database(default_connector=[MySQL, Redshift]), MySQL) @patch.object(BigQuery, "__init__", return_value=None) @patch.object(Postgres, "__init__", return_value=None) @@ -89,10 +79,8 @@ def test_multiple_databases_with_default_list(self, mock_getenv, *_): @patch.object(Redshift, "__init__", return_value=None) @patch("os.getenv") def test_multiple_databases_invalid_default(self, mock_getenv, *_): - mock_getenv.side_effect = ( - lambda var: "password" - if var == "REDSHIFT_PASSWORD" or var == "MYSQL_PASSWORD" - else None + mock_getenv.side_effect = lambda var: ( + "password" if var == "REDSHIFT_PASSWORD" or var == "MYSQL_PASSWORD" else None ) with self.assertRaises(EnvironmentError): discover_database(default_connector=Postgres) @@ -103,10 +91,8 @@ def test_multiple_databases_invalid_default(self, mock_getenv, *_): @patch.object(Redshift, "__init__", return_value=None) @patch("os.getenv") def test_multiple_databases_invalid_default_list(self, mock_getenv, *_): - mock_getenv.side_effect = ( - lambda var: "password" - if var == "REDSHIFT_PASSWORD" or var == "MYSQL_PASSWORD" - else None + mock_getenv.side_effect = lambda var: ( + "password" if var == "REDSHIFT_PASSWORD" or var == "MYSQL_PASSWORD" else None ) with self.assertRaises(EnvironmentError): discover_database(default_connector=[Postgres, BigQuery]) diff --git a/test/test_databases/test_mysql.py b/test/test_databases/test_mysql.py index 323b4ffbf6..9507cdd0ce 100644 --- a/test/test_databases/test_mysql.py +++ b/test/test_databases/test_mysql.py @@ -6,9 +6,7 @@ # These tests interact directly with the MySQL database. To run, set env variable "LIVE_TEST=True" -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestMySQLLive(unittest.TestCase): def setUp(self): @@ -47,9 +45,7 @@ def test_insert_data(self): # These tests interact directly with the MySQL database. To run, set env variable "LIVE_TEST=True" -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestMySQL(unittest.TestCase): def setUp(self): @@ -140,9 +136,7 @@ def test_get_new_rows_count(self): class TestMySQL(unittest.TestCase): # noqa def setUp(self): - self.mysql = MySQL( - username="test", password="test", host="test", db="test", port=123 - ) + self.mysql = MySQL(username="test", password="test", host="test", db="test", port=123) self.tbl = Table( [ diff --git a/test/test_databases/test_postgres.py b/test/test_databases/test_postgres.py index 5279c94ccf..8d232e487c 100644 --- a/test/test_databases/test_postgres.py +++ b/test/test_databases/test_postgres.py @@ -12,9 +12,7 @@ class TestPostgresCreateStatement(unittest.TestCase): def setUp(self): - self.pg = Postgres( - username="test", password="test", host="test", db="test", port=123 - ) + self.pg = Postgres(username="test", password="test", host="test", db="test", port=123) self.tbl = Table([["ID", "Name"], [1, "Jim"], [2, "John"], [3, "Sarah"]]) @@ -157,9 +155,7 @@ def test_create_statement(self): # These tests interact directly with the Postgres database -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestPostgresDB(unittest.TestCase): def setUp(self): @@ -215,9 +211,7 @@ def test_copy(self): # Copy a table and ensure table exists self.pg.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="drop") - r = self.pg.query( - f"select * from {self.temp_schema}.test_copy where name='Jim'" - ) + r = self.pg.query(f"select * from {self.temp_schema}.test_copy where name='Jim'") self.assertEqual(r[0]["id"], 1) # Copy table and ensure truncate works. @@ -236,9 +230,7 @@ def test_copy(self): self.assertEqual(tbl.first, 6) # Try to copy the table and ensure that default fail works. - self.assertRaises( - ValueError, self.pg.copy, self.tbl, f"{self.temp_schema}.test_copy" - ) + self.assertRaises(ValueError, self.pg.copy, self.tbl, f"{self.temp_schema}.test_copy") # Try to copy the table and ensure that explicit fail works. self.assertRaises( @@ -252,9 +244,7 @@ def test_copy(self): def test_to_postgres(self): self.tbl.to_postgres(f"{self.temp_schema}.test_copy") - r = self.pg.query( - f"select * from {self.temp_schema}.test_copy where name='Jim'" - ) + r = self.pg.query(f"select * from {self.temp_schema}.test_copy where name='Jim'") self.assertEqual(r[0]["id"], 1) def test_from_postgres(self): diff --git a/test/test_databases/test_redshift.py b/test/test_databases/test_redshift.py index b811e98857..7288912430 100644 --- a/test/test_databases/test_redshift.py +++ b/test/test_databases/test_redshift.py @@ -15,9 +15,7 @@ class TestRedshift(unittest.TestCase): def setUp(self): - self.rs = Redshift( - username="test", password="test", host="test", db="test", port=123 - ) + self.rs = Redshift(username="test", password="test", host="test", db="test", port=123) self.tbl = Table([["ID", "Name"], [1, "Jim"], [2, "John"], [3, "Sarah"]]) @@ -51,9 +49,7 @@ def test_split_full_table_name(self): self.assertRaises(ValueError, Redshift.split_full_table_name, "a.b.c") def test_combine_schema_and_table_name(self): - full_table_name = Redshift.combine_schema_and_table_name( - "some_schema", "some_table" - ) + full_table_name = Redshift.combine_schema_and_table_name("some_schema", "some_table") self.assertEqual(full_table_name, "some_schema.some_table") def test_data_type(self): @@ -118,17 +114,13 @@ def test_create_sql(self): # Test the the statement is expected sql = self.rs.create_sql("tmc.test", self.mapping, distkey="ID") - exp_sql = ( - "create table tmc.test (\n id int,\n name varchar(5)) \ndistkey(ID) ;" - ) + exp_sql = "create table tmc.test (\n id int,\n name varchar(5)) \ndistkey(ID) ;" self.assertEqual(sql, exp_sql) def test_compound_sortkey(self): # check single sortkey formatting sql = self.rs.create_sql("tmc.test", self.mapping, sortkey="ID") - exp_sql = ( - "create table tmc.test (\n id int,\n name varchar(5)) \nsortkey(ID);" - ) + exp_sql = "create table tmc.test (\n id int,\n name varchar(5)) \nsortkey(ID);" self.assertEqual(sql, exp_sql) # check compound sortkey formatting @@ -207,9 +199,7 @@ def test_copy_statement_default(self): ) # Scrub the keys - sql = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql) - ) + sql = re.sub(r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql)) expected_options = [ "ignoreheader 1", @@ -240,9 +230,7 @@ def test_copy_statement_statupdate(self): ) # Scrub the keys - sql = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql) - ) + sql = re.sub(r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql)) expected_options = [ "statupdate on", @@ -271,9 +259,7 @@ def test_copy_statement_statupdate(self): ) # Scrub the keys - sql2 = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql2) - ) + sql2 = re.sub(r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql2)) expected_options = [ "statupdate off", @@ -304,9 +290,7 @@ def test_copy_statement_compupdate(self): ) # Scrub the keys - sql = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql) - ) + sql = re.sub(r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql)) expected_options = [ "compupdate on", @@ -335,9 +319,7 @@ def test_copy_statement_compupdate(self): ) # Scrub the keys - sql2 = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql2) - ) + sql2 = re.sub(r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql2)) expected_options = [ "compupdate off", @@ -370,9 +352,7 @@ def test_copy_statement_columns(self): ) # Scrub the keys - sql = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql) - ) + sql = re.sub(r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql)) expected_options = [ "ignoreheader 1", @@ -394,9 +374,7 @@ def test_copy_statement_columns(self): # These tests interact directly with the Redshift database -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestRedshiftDB(unittest.TestCase): def setUp(self): @@ -482,9 +460,7 @@ def test_table_exists(self): self.assertFalse(self.rs.table_exists(f"{self.temp_schema}.test_view_fake")) # Check that the view kwarg works - self.assertFalse( - self.rs.table_exists(f"{self.temp_schema}.test_view", view=False) - ) + self.assertFalse(self.rs.table_exists(f"{self.temp_schema}.test_view", view=False)) def test_temp_s3_create(self): @@ -507,9 +483,7 @@ def test_copy(self): self.rs.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="drop") # Test that file exists - r = self.rs.query( - f"select * from {self.temp_schema}.test_copy where name='Jim'" - ) + r = self.rs.query(f"select * from {self.temp_schema}.test_copy where name='Jim'") self.assertEqual(r[0]["id"], 1) # Copy to the same table, to verify that the "truncate" flag works. @@ -528,9 +502,7 @@ def test_copy(self): if_exists="drop", sortkey="Name", ) - desired_log = [ - log for log in lc.records if "optimize your queries" in log.msg - ][0] + desired_log = [log for log in lc.records if "optimize your queries" in log.msg][0] self.assertTrue("DIST" in desired_log.msg) self.assertFalse("SORT" in desired_log.msg) @@ -544,12 +516,8 @@ def test_upsert(self): self.rs.upsert(upsert_tbl, f"{self.temp_schema}.test_copy", "ID") # Make sure that it is the expected table - expected_tbl = Table( - [["id", "name"], [1, "Jane"], [2, "John"], [3, "Sarah"], [5, "Bob"]] - ) - updated_tbl = self.rs.query( - f"select * from {self.temp_schema}.test_copy order by id;" - ) + expected_tbl = Table([["id", "name"], [1, "Jane"], [2, "John"], [3, "Sarah"], [5, "Bob"]]) + updated_tbl = self.rs.query(f"select * from {self.temp_schema}.test_copy order by id;") assert_matching_tables(expected_tbl, updated_tbl) # Try to run it with a bad primary key @@ -577,9 +545,7 @@ def test_upsert(self): [1, "Jane"], ] ) - updated_tbl = self.rs.query( - f"select * from {self.temp_schema}.test_copy order by id;" - ) + updated_tbl = self.rs.query(f"select * from {self.temp_schema}.test_copy order by id;") assert_matching_tables(expected_tbl, updated_tbl) # Try to run it with a bad primary key @@ -600,9 +566,7 @@ def test_upsert(self): # Make sure our table looks like we expect expected_tbl = Table([["id", "name"], [3, "600"], [6, "9999"]]) - updated_tbl = self.rs.query( - f"select * from {self.temp_schema}.test_copy order by id;" - ) + updated_tbl = self.rs.query(f"select * from {self.temp_schema}.test_copy order by id;") assert_matching_tables(expected_tbl, updated_tbl) # Run upsert requiring column resize @@ -613,9 +577,7 @@ def test_upsert(self): expected_tbl = Table( [["id", "name"], [3, "600"], [6, "9999"], [7, "this name is very long"]] ) - updated_tbl = self.rs.query( - f"select * from {self.temp_schema}.test_copy order by id;" - ) + updated_tbl = self.rs.query(f"select * from {self.temp_schema}.test_copy order by id;") assert_matching_tables(expected_tbl, updated_tbl) def test_unload(self): @@ -668,18 +630,10 @@ def test_to_from_redshift(self): def test_generate_manifest(self): # Add some tables to buckets - self.tbl.to_s3_csv( - self.temp_s3_bucket, f"{self.temp_s3_prefix}test_file_01.csv" - ) - self.tbl.to_s3_csv( - self.temp_s3_bucket, f"{self.temp_s3_prefix}test_file_02.csv" - ) - self.tbl.to_s3_csv( - self.temp_s3_bucket, f"{self.temp_s3_prefix}test_file_03.csv" - ) - self.tbl.to_s3_csv( - self.temp_s3_bucket, f"{self.temp_s3_prefix}dont_include.csv" - ) + self.tbl.to_s3_csv(self.temp_s3_bucket, f"{self.temp_s3_prefix}test_file_01.csv") + self.tbl.to_s3_csv(self.temp_s3_bucket, f"{self.temp_s3_prefix}test_file_02.csv") + self.tbl.to_s3_csv(self.temp_s3_bucket, f"{self.temp_s3_prefix}test_file_03.csv") + self.tbl.to_s3_csv(self.temp_s3_bucket, f"{self.temp_s3_prefix}dont_include.csv") # Copy in a table to generate the headers and table self.rs.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="drop") @@ -701,9 +655,7 @@ def test_generate_manifest(self): self.assertEqual(len(manifest["entries"]), 3) # Validate that manifest saved to bucket - keys = self.s3.list_keys( - self.temp_s3_bucket, prefix=f"{self.temp_s3_prefix}test_manifest" - ) + keys = self.s3.list_keys(self.temp_s3_bucket, prefix=f"{self.temp_s3_prefix}test_manifest") self.assertTrue(manifest_key in keys) def test_move_table(self): @@ -833,9 +785,7 @@ def test_union_tables(self): f"{self.temp_schema}.union_all", [f"{self.temp_schema}.union_base1", f"{self.temp_schema}.union_base2"], ) - self.assertEqual( - self.rs.query(f"select * from {self.temp_schema}.union_all").num_rows, 6 - ) + self.assertEqual(self.rs.query(f"select * from {self.temp_schema}.union_all").num_rows, 6) # Union the two tables and check row count self.rs.union_tables( @@ -843,9 +793,7 @@ def test_union_tables(self): [f"{self.temp_schema}.union_base1", f"{self.temp_schema}.union_base2"], union_all=False, ) - self.assertEqual( - self.rs.query(f"select * from {self.temp_schema}.union_test").num_rows, 3 - ) + self.assertEqual(self.rs.query(f"select * from {self.temp_schema}.union_test").num_rows, 3) def test_populate_table_from_query(self): # Populate the source table @@ -868,9 +816,7 @@ def test_populate_table_from_query(self): self.assertEqual(rows[0]["count"], 3) # Try with if_exists='drop', and a distkey - self.rs.populate_table_from_query( - query, dest_table, if_exists="drop", distkey="id" - ) + self.rs.populate_table_from_query(query, dest_table, if_exists="drop", distkey="id") rows = self.rs.query(f"select count(*) from {dest_table}") self.assertEqual(rows[0]["count"], 3) @@ -931,9 +877,7 @@ def test_duplicate_table(self): def test_get_max_value(self): - date_tbl = Table( - [["id", "date_modified"], [1, "2020-01-01"], [2, "1900-01-01"]] - ) + date_tbl = Table([["id", "date_modified"], [1, "2020-01-01"], [2, "1900-01-01"]]) self.rs.copy(date_tbl, f"{self.temp_schema}.test_date") # Test return string @@ -1098,9 +1042,7 @@ def test_alter_varchar_column_widths(self): # Base table 'Name' column has a width of 5. This should expand it to 6. self.rs.alter_varchar_column_widths(append_tbl, f"{self.temp_schema}.test") - self.assertEqual( - self.rs.get_columns(self.temp_schema, "test")["name"]["max_length"], 6 - ) + self.assertEqual(self.rs.get_columns(self.temp_schema, "test")["name"]["max_length"], 6) if __name__ == "__main__": diff --git a/test/test_donorbox/test_donorbox.py b/test/test_donorbox/test_donorbox.py index a8fe8c529a..35c7ea3fb6 100644 --- a/test/test_donorbox/test_donorbox.py +++ b/test/test_donorbox/test_donorbox.py @@ -243,9 +243,7 @@ def test_get_donations_with_date_from_filter_live_test(self): self.assertEqual(result[0]["donation_date"], "2022-10-20T19:33:31.744Z") # Try the other three formats quickly for date_string in ["2022/10/20", "20221020", "20-10-2022"]: - self.assertEqual( - self.donorbox.get_donations(date_from=date_string).num_rows, 1 - ) + self.assertEqual(self.donorbox.get_donations(date_from=date_string).num_rows, 1) # Incorrect formats raise error with self.assertRaises(ValueError): result = self.donorbox.get_donations(date_from="10 20 2022") @@ -260,9 +258,7 @@ def test_get_donations_with_date_to_filter_live_test(self): self.assertEqual(result[0]["donation_date"], "2022-10-19T18:19:06.044Z") # Try the other three formats quickly for date_string in ["2022/10/20", "20221020", "20-10-2022"]: - self.assertEqual( - self.donorbox.get_donations(date_to=date_string).num_rows, 2 - ) + self.assertEqual(self.donorbox.get_donations(date_to=date_string).num_rows, 2) # Incorrect formats raise error with self.assertRaises(ValueError): result = self.donorbox.get_donations(date_to="10 20 2022") @@ -332,15 +328,11 @@ def test_get_donations_with_amount_max_filter_live_test(self): @requests_mock.Mocker() def test_get_donors(self, m): - m.get( - self.base_uri + "/donors", json=donorbox_test_data.get_donors_response_json - ) + m.get(self.base_uri + "/donors", json=donorbox_test_data.get_donors_response_json) result = self.donorbox.get_donors() # Assert the method returns expected dict response - self.assertDictEqual( - result.to_dicts()[0], donorbox_test_data.get_donors_response_json[0] - ) + self.assertDictEqual(result.to_dicts()[0], donorbox_test_data.get_donors_response_json[0]) columns = [ "id", "created_at", diff --git a/test/test_etl.py b/test/test_etl.py index d6ed7ac8a4..7355f6def4 100644 --- a/test/test_etl.py +++ b/test/test_etl.py @@ -517,14 +517,10 @@ def test_unpack_nested_columns_as_rows_expanded(self): ] ) - expanded = test_table.unpack_nested_columns_as_rows( - "nested", expand_original=True - ) + expanded = test_table.unpack_nested_columns_as_rows("nested", expand_original=True) # Check that the columns are as expected - self.assertEqual( - ["uid", "id", "extra", "nested", "nested_value"], expanded.columns - ) + self.assertEqual(["uid", "id", "extra", "nested", "nested_value"], expanded.columns) # Check that the row count is as expected self.assertEqual(expanded.num_rows, 12) @@ -540,9 +536,7 @@ def test_cut(self): def test_row_select(self): - tbl = Table( - [["foo", "bar", "baz"], ["c", 4, 9.3], ["a", 2, 88.2], ["b", 1, 23.3]] - ) + tbl = Table([["foo", "bar", "baz"], ["c", 4, 9.3], ["a", 2, 88.2], ["b", 1, 23.3]]) expected = Table([{"foo": "a", "bar": 2, "baz": 88.2}]) # Try with this method @@ -718,9 +712,7 @@ def test_match_columns(self): ) tbl.match_columns(desired_tbl.columns) desired_tbl = ( - Table(desired_raw) - .remove_column("middle_name") - .add_column("middle_name", index=1) + Table(desired_raw).remove_column("middle_name").add_column("middle_name", index=1) ) assert_matching_tables(desired_tbl, tbl) @@ -994,9 +986,7 @@ def test_deduplicate(self): assert_matching_tables(tbl_expected, tbl) # Confirm deduplicate works for multiple keys - tbl = Table( - [["a", "b", "c"], [1, 2, 3], [1, 2, 3], [1, 2, 4], [1, 3, 2], [2, 3, 4]] - ) + tbl = Table([["a", "b", "c"], [1, 2, 3], [1, 2, 3], [1, 2, 4], [1, 3, 2], [2, 3, 4]]) tbl_expected = Table([["a", "b", "c"], [1, 2, 3], [1, 3, 2], [2, 3, 4]]) tbl.deduplicate(["a", "b"]) assert_matching_tables(tbl_expected, tbl) diff --git a/test/test_facebook_ads.py b/test/test_facebook_ads.py index 7a0fe828a5..4b8d34adb2 100644 --- a/test/test_facebook_ads.py +++ b/test/test_facebook_ads.py @@ -26,9 +26,7 @@ ) -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestFacebookAdsIntegration(unittest.TestCase): def setUp(self): @@ -66,31 +64,21 @@ def test_add_users_to_custom_audience_no_valid_columns(self): {"full name": "Bob Smith"}, ] ) - self.assertRaises( - KeyError, self.fb_ads.add_users_to_custom_audience, self.audience_id, tbl - ) + self.assertRaises(KeyError, self.fb_ads.add_users_to_custom_audience, self.audience_id, tbl) class TestFacebookAdsUtilities(unittest.TestCase): def test_get_match_key_for_column(self): # Test just a few of the mappings self.assertEqual("EMAIL", FacebookAds._get_match_key_for_column("email")) - self.assertEqual( - "EMAIL", FacebookAds._get_match_key_for_column("voterbase_email") - ) + self.assertEqual("EMAIL", FacebookAds._get_match_key_for_column("voterbase_email")) self.assertEqual("FN", FacebookAds._get_match_key_for_column("first name")) self.assertEqual("FN", FacebookAds._get_match_key_for_column("FIRST-NAME ")) - self.assertEqual( - "FN", FacebookAds._get_match_key_for_column("vb_tsmart_first_name") - ) + self.assertEqual("FN", FacebookAds._get_match_key_for_column("vb_tsmart_first_name")) self.assertEqual("LN", FacebookAds._get_match_key_for_column("Last Name!")) self.assertEqual("ST", FacebookAds._get_match_key_for_column("state code")) - self.assertEqual( - "ST", FacebookAds._get_match_key_for_column("vb_vf_source_state") - ) - self.assertEqual( - "GEN", FacebookAds._get_match_key_for_column("vb_voterbase_gender") - ) + self.assertEqual("ST", FacebookAds._get_match_key_for_column("vb_vf_source_state")) + self.assertEqual("GEN", FacebookAds._get_match_key_for_column("vb_voterbase_gender")) self.assertEqual( "PHONE", FacebookAds._get_match_key_for_column("vb_voterbase_phone_wireless"), diff --git a/test/test_geocoder/test_census_geocoder.py b/test/test_geocoder/test_census_geocoder.py index 227aee1ec9..8b16ac1d0f 100644 --- a/test/test_geocoder/test_census_geocoder.py +++ b/test/test_geocoder/test_census_geocoder.py @@ -7,9 +7,7 @@ from test.utils import assert_matching_tables -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestCensusGeocoder(unittest.TestCase): def setUp(self): diff --git a/test/test_github/test_github.py b/test/test_github/test_github.py index fd0da7b991..04d6ea4cc1 100644 --- a/test/test_github/test_github.py +++ b/test/test_github/test_github.py @@ -34,9 +34,7 @@ def test_get_repo(self, m): def test_list_repo_issues(self, m): with open(os.path.join(_dir, "test_data", "test_get_repo.json"), "r") as f: m.get("https://api.github.com:443/repos/octocat/Hello-World", text=f.read()) - with open( - os.path.join(_dir, "test_data", "test_list_repo_issues.json"), "r" - ) as f: + with open(os.path.join(_dir, "test_data", "test_list_repo_issues.json"), "r") as f: m.get( "https://api.github.com:443/repos/octocat/Hello-World/issues", text=f.read(), @@ -57,9 +55,7 @@ def test_download_file(self, m): text=f.read(), ) - file_path = self.github.download_file( - "octocat/Hello-World", "data.csv", branch="testing" - ) + file_path = self.github.download_file("octocat/Hello-World", "data.csv", branch="testing") with open(file_path, "r") as f: file_contents = f.read() diff --git a/test/test_gmail/test_gmail.py b/test/test_gmail/test_gmail.py index 5ac6902d4c..35fc992774 100644 --- a/test/test_gmail/test_gmail.py +++ b/test/test_gmail/test_gmail.py @@ -84,9 +84,7 @@ def test_create_message_simple(self): msg = self.gmail._create_message_simple(sender, to, subject, message_text) raw = self.gmail._encode_raw_message(msg) - decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + decoded = email.message_from_bytes(base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8"))) expected_items = [ ("Content-Type", 'text/plain; charset="us-ascii"'), @@ -114,14 +112,10 @@ def test_create_message_html(self): message_text = "The is the message text of the email" message_html = "

This is the html message part of the email

" - msg = self.gmail._create_message_html( - sender, to, subject, message_text, message_html - ) + msg = self.gmail._create_message_html(sender, to, subject, message_text, message_html) raw = self.gmail._encode_raw_message(msg) - decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + decoded = email.message_from_bytes(base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8"))) expected_items = [ ("Content-Type", "multipart/alternative;\n boundary="), @@ -135,12 +129,8 @@ def test_create_message_html(self): # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa - updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + if "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1]: # noqa + updated_items.append(("Content-Type", "multipart/alternative;\n boundary=")) else: updated_items.append((i[0], i[1])) @@ -167,9 +157,7 @@ def test_create_message_html_no_text(self): msg = self.gmail._create_message_html(sender, to, subject, "", message_html) raw = self.gmail._encode_raw_message(msg) - decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + decoded = email.message_from_bytes(base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8"))) expected_items = [ ("Content-Type", "multipart/alternative;\n boundary="), @@ -183,12 +171,8 @@ def test_create_message_html_no_text(self): # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa - updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + if "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1]: # noqa + updated_items.append(("Content-Type", "multipart/alternative;\n boundary=")) else: updated_items.append((i[0], i[1])) @@ -210,9 +194,7 @@ def test_create_message_attachments(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) + message_html = "

This is the html message part of the email " "with attachments

" attachments = [f"{_dir}/assets/loremipsum.txt"] msg = self.gmail._create_message_attachments( @@ -220,9 +202,7 @@ def test_create_message_attachments(self): ) raw = self.gmail._encode_raw_message(msg) - decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + decoded = email.message_from_bytes(base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8"))) expected_items = [ ("Content-Type", "multipart/alternative;\n boundary="), @@ -236,12 +216,8 @@ def test_create_message_attachments(self): # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa - updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + if "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1]: # noqa + updated_items.append(("Content-Type", "multipart/alternative;\n boundary=")) else: updated_items.append((i[0], i[1])) @@ -275,9 +251,7 @@ def test_create_message_attachments_jpeg(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) + message_html = "

This is the html message part of the email " "with attachments

" attachments = [f"{_dir}/assets/loremipsum.jpeg"] msg = self.gmail._create_message_attachments( @@ -285,9 +259,7 @@ def test_create_message_attachments_jpeg(self): ) raw = self.gmail._encode_raw_message(msg) - decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + decoded = email.message_from_bytes(base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8"))) expected_items = [ ("Content-Type", "multipart/alternative;\n boundary="), @@ -301,12 +273,8 @@ def test_create_message_attachments_jpeg(self): # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa - updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + if "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1]: # noqa + updated_items.append(("Content-Type", "multipart/alternative;\n boundary=")) else: updated_items.append((i[0], i[1])) @@ -337,9 +305,7 @@ def test_create_message_attachments_m4a(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) + message_html = "

This is the html message part of the email " "with attachments

" attachments = [f"{_dir}/assets/loremipsum.m4a"] msg = self.gmail._create_message_attachments( @@ -347,9 +313,7 @@ def test_create_message_attachments_m4a(self): ) raw = self.gmail._encode_raw_message(msg) - decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + decoded = email.message_from_bytes(base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8"))) expected_items = [ ("Content-Type", "multipart/alternative;\n boundary="), @@ -363,12 +327,8 @@ def test_create_message_attachments_m4a(self): # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa - updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + if "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1]: # noqa + updated_items.append(("Content-Type", "multipart/alternative;\n boundary=")) else: updated_items.append((i[0], i[1])) @@ -397,9 +357,7 @@ def test_create_message_attachments_mp3(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) + message_html = "

This is the html message part of the email " "with attachments

" attachments = [f"{_dir}/assets/loremipsum.mp3"] msg = self.gmail._create_message_attachments( @@ -407,9 +365,7 @@ def test_create_message_attachments_mp3(self): ) raw = self.gmail._encode_raw_message(msg) - decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + decoded = email.message_from_bytes(base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8"))) expected_items = [ ("Content-Type", "multipart/alternative;\n boundary="), @@ -423,12 +379,8 @@ def test_create_message_attachments_mp3(self): # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa - updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + if "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1]: # noqa + updated_items.append(("Content-Type", "multipart/alternative;\n boundary=")) else: updated_items.append((i[0], i[1])) @@ -457,9 +409,7 @@ def test_create_message_attachments_mp4(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) + message_html = "

This is the html message part of the email " "with attachments

" attachments = [f"{_dir}/assets/loremipsum.mp4"] msg = self.gmail._create_message_attachments( @@ -467,9 +417,7 @@ def test_create_message_attachments_mp4(self): ) raw = self.gmail._encode_raw_message(msg) - decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + decoded = email.message_from_bytes(base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8"))) expected_items = [ ("Content-Type", "multipart/alternative;\n boundary="), @@ -483,12 +431,8 @@ def test_create_message_attachments_mp4(self): # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa - updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + if "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1]: # noqa + updated_items.append(("Content-Type", "multipart/alternative;\n boundary=")) else: updated_items.append((i[0], i[1])) @@ -517,9 +461,7 @@ def test_create_message_attachments_pdf(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) + message_html = "

This is the html message part of the email " "with attachments

" attachments = [f"{_dir}/assets/loremipsum.pdf"] msg = self.gmail._create_message_attachments( @@ -528,9 +470,7 @@ def test_create_message_attachments_pdf(self): raw = self.gmail._encode_raw_message(msg) - decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + decoded = email.message_from_bytes(base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8"))) expected_items = [ ("Content-Type", "multipart/alternative;\n boundary="), @@ -544,12 +484,8 @@ def test_create_message_attachments_pdf(self): # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa - updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + if "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1]: # noqa + updated_items.append(("Content-Type", "multipart/alternative;\n boundary=")) else: updated_items.append((i[0], i[1])) @@ -588,9 +524,7 @@ def test__validate_email_string(self): if e["expected"]: self.assertTrue(self.gmail._validate_email_string(e["email"])) else: - self.assertRaises( - ValueError, self.gmail._validate_email_string, e["email"] - ) + self.assertRaises(ValueError, self.gmail._validate_email_string, e["email"]) # TODO test sending emails diff --git a/test/test_google/googlecivic_responses.py b/test/test_google/googlecivic_responses.py index e1e76c1141..5994e3bd21 100644 --- a/test/test_google/googlecivic_responses.py +++ b/test/test_google/googlecivic_responses.py @@ -544,9 +544,7 @@ "name": "Thomas J. Dart", "party": "Democratic", "candidateUrl": "http://www.sherifftomdart.com/", - "channels": [ - {"type": "Twitter", "id": "https://twitter.com/TomDart"} - ], + "channels": [{"type": "Twitter", "id": "https://twitter.com/TomDart"}], } ], "sources": [{"name": "Ballot Information Project", "official": False}], @@ -937,9 +935,7 @@ "name": "California's 16th congressional district", "officeIndices": [3], }, - "ocd-division/country:us/state:ca/place:mountain_view": { - "name": "Mountain View city" - }, + "ocd-division/country:us/state:ca/place:mountain_view": {"name": "Mountain View city"}, "ocd-division/country:us/state:ca/sldl:23": { "name": "California Assembly district 23", "officeIndices": [12], @@ -1109,9 +1105,7 @@ }, { "name": "Alex Padilla", - "address": [ - {"line1": "B03", "city": "Washington", "state": "DC", "zip": "20510"} - ], + "address": [{"line1": "B03", "city": "Washington", "state": "DC", "zip": "20510"}], "party": "Democratic Party", "phones": ["(202) 224-3553"], "urls": [ diff --git a/test/test_google/test_google_cloud_storage.py b/test/test_google/test_google_cloud_storage.py index a264c701e5..a047544b4e 100644 --- a/test/test_google/test_google_cloud_storage.py +++ b/test/test_google/test_google_cloud_storage.py @@ -10,9 +10,7 @@ TEMP_FILE_NAME = "tmp_file_01.txt" -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestGoogleStorageBuckets(unittest.TestCase): def setUp(self): @@ -46,9 +44,7 @@ def test_bucket_exists(self): def test_get_bucket(self): # Assert that a bucket object is returned - self.assertIsInstance( - self.cloud.get_bucket(TEMP_BUCKET_NAME), storage.bucket.Bucket - ) + self.assertIsInstance(self.cloud.get_bucket(TEMP_BUCKET_NAME), storage.bucket.Bucket) def test_create_bucket(self): diff --git a/test/test_google/test_google_sheets.py b/test/test_google/test_google_sheets.py index 7df50ddf94..d7672b76a7 100644 --- a/test/test_google/test_google_sheets.py +++ b/test/test_google/test_google_sheets.py @@ -6,9 +6,7 @@ from test.utils import assert_matching_tables -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestGoogleSheets(unittest.TestCase): def setUp(self): @@ -31,9 +29,7 @@ def setUp(self): {"city": "Chicago", "state": "IL"}, ] ) - self.google_sheets.overwrite_sheet( - self.spreadsheet_id, self.second_test_table, 1 - ) + self.google_sheets.overwrite_sheet(self.spreadsheet_id, self.second_test_table, 1) def tearDown(self): # self.google_sheets.delete_spreadsheet(self.spreadsheet_id) @@ -41,24 +37,18 @@ def tearDown(self): def test_read_worksheet(self): # This is the spreadsheet called "Legislators 2017 (Test sheet for Parsons)" - table = self.google_sheets.get_worksheet( - "1Y_pZxz-8JZ9QBdq1pXuIk2js_VXeymOUoZhUp1JVEg8" - ) + table = self.google_sheets.get_worksheet("1Y_pZxz-8JZ9QBdq1pXuIk2js_VXeymOUoZhUp1JVEg8") self.assertEqual(541, table.num_rows) def test_read_sheet(self): # Deprecated in Parsons v0.14 # This is the spreadsheet called "Legislators 2017 (Test sheet for Parsons)" - table = self.google_sheets.read_sheet( - "1Y_pZxz-8JZ9QBdq1pXuIk2js_VXeymOUoZhUp1JVEg8" - ) + table = self.google_sheets.read_sheet("1Y_pZxz-8JZ9QBdq1pXuIk2js_VXeymOUoZhUp1JVEg8") self.assertEqual(541, table.num_rows) def test_read_nonexistent_worksheet(self): - self.assertRaises( - gspread.exceptions.APIError, self.google_sheets.read_sheet, "abc123" - ) + self.assertRaises(gspread.exceptions.APIError, self.google_sheets.read_sheet, "abc123") def test_create_spreadsheet(self): # Created as part of setUp @@ -67,9 +57,7 @@ def test_create_spreadsheet(self): def test_add_sheet(self): # Sheet added as part of setUp # Also tests get_sheet_index_with_title - idx = self.google_sheets.get_worksheet_index( - self.spreadsheet_id, self.second_sheet_title - ) + idx = self.google_sheets.get_worksheet_index(self.spreadsheet_id, self.second_sheet_title) self.assertEqual(1, idx) def test_get_sheet_index_with_bogus_title(self): @@ -81,9 +69,7 @@ def test_get_sheet_index_with_bogus_title(self): ) def test_read_worksheet_with_title(self): - table = self.google_sheets.get_worksheet( - self.spreadsheet_id, self.second_sheet_title - ) + table = self.google_sheets.get_worksheet(self.spreadsheet_id, self.second_sheet_title) self.assertEqual(self.second_test_table.columns, table.columns) def test_append_to_spreadsheet(self): @@ -99,9 +85,7 @@ def test_append_to_spreadsheet(self): self.assertEqual(append_table.columns, result_table.columns) # We should now have rows from both tables - self.assertEqual( - self.test_table.num_rows + append_table.num_rows, result_table.num_rows - ) + self.assertEqual(self.test_table.num_rows + append_table.num_rows, result_table.num_rows) # First check that we didn't muck with the original data for i in range(self.test_table.num_rows): @@ -110,9 +94,7 @@ def test_append_to_spreadsheet(self): # Then check that we appended the data properly for i in range(append_table.num_rows): - self.assertEqual( - append_table.data[i], result_table.data[orig_row_count + i] - ) + self.assertEqual(append_table.data[i], result_table.data[orig_row_count + i]) # Test that we can append to an empty sheet self.google_sheets.add_sheet(self.spreadsheet_id, "Sheet3") @@ -160,7 +142,5 @@ def test_share_spreadsheet(self): self.google_sheets.share_spreadsheet( self.spreadsheet_id, "bob@bob.com", role="reader", notify=True ) - permissions = self.google_sheets.get_spreadsheet_permissions( - self.spreadsheet_id - ) + permissions = self.google_sheets.get_spreadsheet_permissions(self.spreadsheet_id) self.assertIn("bob@bob.com", permissions["emailAddress"]) diff --git a/test/test_google/test_utilities.py b/test/test_google/test_utilities.py index 618b28dba5..5a6b8c2966 100644 --- a/test/test_google/test_utilities.py +++ b/test/test_google/test_utilities.py @@ -37,9 +37,7 @@ def test_noop_if_env_already_set(self): self.assertEqual(os.environ[self.TEST_ENV_NAME], self.cred_path) def test_accepts_dictionary(self): - util.setup_google_application_credentials( - self.cred_contents, self.TEST_ENV_NAME - ) + util.setup_google_application_credentials(self.cred_contents, self.TEST_ENV_NAME) actual = os.environ[self.TEST_ENV_NAME] self.assertTrue(os.path.exists(actual)) with open(actual, "r") as f: @@ -62,9 +60,7 @@ def test_accepts_file_path(self): def test_credentials_are_valid_after_double_call(self): # write creds to tmp file... - util.setup_google_application_credentials( - self.cred_contents, self.TEST_ENV_NAME - ) + util.setup_google_application_credentials(self.cred_contents, self.TEST_ENV_NAME) fst = os.environ[self.TEST_ENV_NAME] # repeat w/ default args... diff --git a/test/test_hustle/test_hustle.py b/test/test_hustle/test_hustle.py index 22393661a0..e561977188 100644 --- a/test/test_hustle/test_hustle.py +++ b/test/test_hustle/test_hustle.py @@ -17,9 +17,7 @@ def setUp(self, m): @requests_mock.Mocker() def test_auth_token(self, m): - self.assertEqual( - self.hustle.auth_token, expected_json.auth_token["access_token"] - ) + self.assertEqual(self.hustle.auth_token, expected_json.auth_token["access_token"]) @requests_mock.Mocker() def test_get_organizations(self, m): @@ -48,9 +46,7 @@ def test_get_group(self, m): @requests_mock.Mocker() def test_create_lead(self, m): m.post(HUSTLE_URI + "groups/cMCH0hxwGt/leads", json=expected_json.lead) - lead = self.hustle.create_lead( - "cMCH0hxwGt", "Barack", "5126993336", last_name="Obama" - ) + lead = self.hustle.create_lead("cMCH0hxwGt", "Barack", "5126993336", last_name="Obama") self.assertEqual(lead, expected_json.lead) @requests_mock.Mocker() @@ -132,15 +128,11 @@ def test_create_agent(self, m): @requests_mock.Mocker() def test_update_agent(self, m): m.put(HUSTLE_URI + "agents/CrJUBI1CF", json=expected_json.agent) - updated_agent = self.hustle.update_agent( - "CrJUBI1CF", name="Angela", full_name="Jones" - ) + updated_agent = self.hustle.update_agent("CrJUBI1CF", name="Angela", full_name="Jones") self.assertEqual(updated_agent, expected_json.agent) @requests_mock.Mocker() def test_create_group_membership(self, m): m.post(HUSTLE_URI + "groups/zajXdqtzRt/memberships", json=expected_json.group) - group_membership = self.hustle.create_group_membership( - "zajXdqtzRt", "A6ebDlAtqB" - ) + group_membership = self.hustle.create_group_membership("zajXdqtzRt", "A6ebDlAtqB") self.assertEqual(group_membership, expected_json.group) diff --git a/test/test_mobilecommons/test_mobilecommons.py b/test/test_mobilecommons/test_mobilecommons.py index dd80d8a562..8132a7a4df 100644 --- a/test/test_mobilecommons/test_mobilecommons.py +++ b/test/test_mobilecommons/test_mobilecommons.py @@ -24,9 +24,7 @@ class TestMobileCommons(unittest.TestCase): def setUp(self, m): self.base_uri = "https://secure.mcommons.com/api/" - self.mc = MobileCommons( - username=MOBILECOMMONS_USERNAME, password=MOBILECOMMONS_PASSWORD - ) + self.mc = MobileCommons(username=MOBILECOMMONS_USERNAME, password=MOBILECOMMONS_PASSWORD) @requests_mock.Mocker() def test_parse_get_request(self, m): @@ -75,8 +73,7 @@ def test_get_profiles(self, m): self.assertEqual( profiles[0]["first_name"], "James", - "MobileCommons.get_profiles method not returning a table structured" - "as expected", + "MobileCommons.get_profiles method not returning a table structured" "as expected", ) @requests_mock.Mocker() @@ -95,8 +92,7 @@ def test_get_broadcasts(self, m): self.assertEqual( broadcasts[0]["id"], "2543129", - "MobileCommons.get_broadcasts method not returning a table structured" - "as expected", + "MobileCommons.get_broadcasts method not returning a table structured" "as expected", ) @requests_mock.Mocker() diff --git a/test/test_mobilize/test_mobilize_america.py b/test/test_mobilize/test_mobilize_america.py index 3301c07f31..2609204408 100644 --- a/test/test_mobilize/test_mobilize_america.py +++ b/test/test_mobilize/test_mobilize_america.py @@ -134,6 +134,4 @@ def test_get_events_deleted(self, m): m.get(self.ma.uri + "events/deleted", json=test_json.GET_EVENTS_DELETED_JSON) # Assert response is expected structure - self.assertTrue( - validate_list(["id", "deleted_date"], self.ma.get_events_deleted()) - ) + self.assertTrue(validate_list(["id", "deleted_date"], self.ma.get_events_deleted())) diff --git a/test/test_mobilize/test_mobilize_json.py b/test/test_mobilize/test_mobilize_json.py index 6f7ea324d9..37aa0a6a3b 100644 --- a/test/test_mobilize/test_mobilize_json.py +++ b/test/test_mobilize/test_mobilize_json.py @@ -3,9 +3,7 @@ GET_ORGANIZATIONS_JSON = { "count": 38, "next": None, - "previous": ( - "https://events.mobilizeamerica.io/api/v1/organizations?updated_since=1543644000" - ), + "previous": ("https://events.mobilizeamerica.io/api/v1/organizations?updated_since=1543644000"), "data": [ { "id": 1251, @@ -58,9 +56,7 @@ "created_date": 1538590930, "modified_date": 1546468308, }, - "timeslots": [ - {"id": 526226, "start_date": 1547330400, "end_date": 1547335800} - ], + "timeslots": [{"id": 526226, "start_date": 1547330400, "end_date": 1547335800}], "location": { "venue": "Harris County Democratic Party HQ", "address_lines": ["4619 Lyons Ave", ""], @@ -75,9 +71,7 @@ "event_type": "TRAINING", "created_date": 1546469706, "modified_date": 1547335800, - "browser_url": ( - "https://events.mobilizeamerica.io/battlegroundtexas/event/86738/" - ), + "browser_url": ("https://events.mobilizeamerica.io/battlegroundtexas/event/86738/"), "high_priority": None, "contact": None, "visibility": "PUBLIC", diff --git a/test/test_nation_builder/test_nation_builder.py b/test/test_nation_builder/test_nation_builder.py index 83c52ffe6f..c09ed02f39 100644 --- a/test/test_nation_builder/test_nation_builder.py +++ b/test/test_nation_builder/test_nation_builder.py @@ -92,9 +92,9 @@ def test_get_people_with_next(self, m): nb = NB("test-slug", "test-token") GET_PEOPLE_RESPONSE_WITH_NEXT = GET_PEOPLE_RESPONSE.copy() - GET_PEOPLE_RESPONSE_WITH_NEXT[ - "next" - ] = "https://test-slug.nationbuilder.com/api/v1/people?limit=100&__nonce=bar&__token=baz" + GET_PEOPLE_RESPONSE_WITH_NEXT["next"] = ( + "https://test-slug.nationbuilder.com/api/v1/people?limit=100&__nonce=bar&__token=baz" + ) m.get( "https://test-slug.nationbuilder.com/api/v1/people", diff --git a/test/test_p2a.py b/test/test_p2a.py index 3116447a66..3f2f9677b3 100644 --- a/test/test_p2a.py +++ b/test/test_p2a.py @@ -59,9 +59,7 @@ }, ], "fields": [], - "phones": [ - {"id": 10537860, "address": "+19995206447", "subscribed": "false"} - ], + "phones": [{"id": 10537860, "address": "+19995206447", "subscribed": "false"}], "emails": [ {"id": 10537871, "address": "N@k.com", "subscribed": "false"}, {"id": 10950446, "address": "email@me.com", "subscribed": "false"}, @@ -188,9 +186,7 @@ def test_get_advocates(self, m): "memberships_name", "memberships_source", ] - self.assertTrue( - validate_list(member_exp, self.p2a.get_advocates()["memberships"]) - ) + self.assertTrue(validate_list(member_exp, self.p2a.get_advocates()["memberships"])) fields_exp = ["advocate_id", "fields"] self.assertTrue(validate_list(fields_exp, self.p2a.get_advocates()["fields"])) @@ -259,24 +255,18 @@ def test_create_advocate(self, m): # Test arg validation - create requires a phone or an email self.assertRaises( ValueError, - lambda: self.p2a.create_advocate( - campaigns=[1], firstname="Foo", lastname="bar" - ), + lambda: self.p2a.create_advocate(campaigns=[1], firstname="Foo", lastname="bar"), ) # Test arg validation - sms opt in requires a phone self.assertRaises( ValueError, - lambda: self.p2a.create_advocate( - campaigns=[1], email="foo@bar.com", sms_optin=True - ), + lambda: self.p2a.create_advocate(campaigns=[1], email="foo@bar.com", sms_optin=True), ) # Test arg validation - email opt in requires a email self.assertRaises( ValueError, - lambda: self.p2a.create_advocate( - campaigns=[1], phone="1234567890", email_optin=True - ), + lambda: self.p2a.create_advocate(campaigns=[1], phone="1234567890", email_optin=True), ) # Test a successful call diff --git a/test/test_pdi/test_events.py b/test/test_pdi/test_events.py index 6f2de7656c..36791e8b98 100644 --- a/test/test_pdi/test_events.py +++ b/test/test_pdi/test_events.py @@ -1,7 +1,6 @@ from test.utils import mark_live_test from parsons import Table - ##### START_DATE = "2020-01-01" @@ -18,7 +17,7 @@ def test_get_calendars(live_pdi): response = live_pdi.get_calendars() - assert type(response) == Table + assert isinstance(response, Table) @mark_live_test @@ -32,7 +31,7 @@ def test_get_calendars_with_limit(live_pdi): def test_get_event_activities(live_pdi): response = live_pdi.get_event_activities(start_date=START_DATE, end_date=END_DATE) - assert type(response) == Table + assert isinstance(response, Table) @mark_live_test @@ -50,7 +49,7 @@ def test_get_event_activity_assignments(live_pdi): start_date=START_DATE, end_date=END_DATE, expand=EXPAND ) - assert type(response) == Table + assert isinstance(response, Table) @mark_live_test diff --git a/test/test_pdi/test_pdi.py b/test/test_pdi/test_pdi.py index 6343515853..52cc7bb383 100644 --- a/test/test_pdi/test_pdi.py +++ b/test/test_pdi/test_pdi.py @@ -20,6 +20,7 @@ def remove_from_env(*env_vars): # Tests # + # Need to provide environment variables # PDI_USERNAME, PDI_PASSWORD, PDI_API_TOKEN @mark_live_test diff --git a/test/test_quickbase/test_quickbase.py b/test/test_quickbase/test_quickbase.py index 8c23d75e4b..1f7317a483 100644 --- a/test/test_quickbase/test_quickbase.py +++ b/test/test_quickbase/test_quickbase.py @@ -9,9 +9,7 @@ class TestQuickbase(unittest.TestCase): def test_get_app_tables(self, m): qb = Quickbase(hostname="test.example.com", user_token="12345") - m.get( - f"{qb.api_hostname}/tables?appId=test", json=test_data.test_get_app_tables - ) + m.get(f"{qb.api_hostname}/tables?appId=test", json=test_data.test_get_app_tables) tbl = qb.get_app_tables(app_id="test") self.assertEqual(tbl.num_rows, 2) diff --git a/test/test_redash.py b/test/test_redash.py index a81a23882f..b3c92cc30d 100644 --- a/test/test_redash.py +++ b/test/test_redash.py @@ -54,15 +54,11 @@ def test_update_data_source(self, m): def test_cached_query(self, m): redash = Redash(BASE_URL) # no user_api_key m.get(f"{BASE_URL}/api/queries/5/results.csv", text=self.mock_data) - assert_matching_tables( - redash.get_cached_query_results(5, API_KEY), self.mock_result - ) + assert_matching_tables(redash.get_cached_query_results(5, API_KEY), self.mock_result) self.assertEqual(m._adapter.last_request.path, "/api/queries/5/results.csv") self.assertEqual(m._adapter.last_request.query, "api_key=abc123") - assert_matching_tables( - self.redash.get_cached_query_results(5), self.mock_result - ) + assert_matching_tables(self.redash.get_cached_query_results(5), self.mock_result) self.assertEqual(m._adapter.last_request.query, "") @requests_mock.Mocker() @@ -79,9 +75,7 @@ def test_refresh_query(self, m): @requests_mock.Mocker() def test_refresh_query_poll(self, m): - m.post( - f"{BASE_URL}/api/queries/5/refresh", json={"job": {"id": 66, "status": 1}} - ) + m.post(f"{BASE_URL}/api/queries/5/refresh", json={"job": {"id": 66, "status": 1}}) m.get( f"{BASE_URL}/api/jobs/66", json={"job": {"id": 66, "status": 3, "query_result_id": 21}}, @@ -95,9 +89,7 @@ def test_refresh_query_poll(self, m): @requests_mock.Mocker() def test_refresh_query_poll_timeout(self, m): - m.post( - f"{BASE_URL}/api/queries/5/refresh", json={"job": {"id": 66, "status": 1}} - ) + m.post(f"{BASE_URL}/api/queries/5/refresh", json={"job": {"id": 66, "status": 1}}) m.get(f"{BASE_URL}/api/jobs/66", json={"job": {"id": 66, "status": 1}}) m.get(f"{BASE_URL}/api/queries/5/results/21.csv", text=self.mock_data) diff --git a/test/test_s3.py b/test/test_s3.py index fbc8054440..fca2f656f0 100644 --- a/test/test_s3.py +++ b/test/test_s3.py @@ -11,9 +11,7 @@ # to run properly. -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get("LIVE_TEST"), "Skipping because not running live test") class TestS3(unittest.TestCase): def setUp(self): @@ -195,7 +193,5 @@ def test_get_buckets_with_subname(self): buckets_with_subname_true = self.s3.get_buckets_type(self.test_bucket_subname) self.assertTrue(self.test_bucket in buckets_with_subname_true) - buckets_with_subname_false = self.s3.get_buckets_type( - "bucketsubnamedoesnotexist" - ) + buckets_with_subname_false = self.s3.get_buckets_type("bucketsubnamedoesnotexist") self.assertFalse(self.test_bucket in buckets_with_subname_false) diff --git a/test/test_salesforce/test_salesforce.py b/test/test_salesforce/test_salesforce.py index 18e6e3b6b7..d90cc76129 100644 --- a/test/test_salesforce/test_salesforce.py +++ b/test/test_salesforce/test_salesforce.py @@ -19,9 +19,7 @@ def setUp(self): { "attributes": { "type": "Contact", - "url": "/services/data/v38.0/" - + "sobjects/Contact/" - + "1234567890AaBbC", + "url": "/services/data/v38.0/" + "sobjects/Contact/" + "1234567890AaBbC", }, "Id": "1234567890AaBbC", } diff --git a/test/test_scytl/test_scytl.py b/test/test_scytl/test_scytl.py index eb0309454c..5544cf78d2 100644 --- a/test/test_scytl/test_scytl.py +++ b/test/test_scytl/test_scytl.py @@ -34,9 +34,7 @@ def test_get_summary_results_succeeds(self): expectedResultRow["counties_reporting"] = ( expectedResultRow["counties_reporting"] or None ) - expectedResultRow["total_counties"] = ( - expectedResultRow["total_counties"] or None - ) + expectedResultRow["total_counties"] = expectedResultRow["total_counties"] or None self.assertDictEqual(row, expectedResultRow) @@ -62,12 +60,8 @@ def test_get_detailed_results_succeeds(self): for i in range(len(result)): expectedResultRow = expectedResult[i] - expectedResultRow["recorded_votes"] = int( - expectedResultRow["recorded_votes"] - ) - expectedResultRow[ - "timestamp_last_updated" - ] = self.scy._parse_date_to_utc( + expectedResultRow["recorded_votes"] = int(expectedResultRow["recorded_votes"]) + expectedResultRow["timestamp_last_updated"] = self.scy._parse_date_to_utc( expectedResultRow["timestamp_last_updated"] ) @@ -95,12 +89,8 @@ def test_get_detailed_results_for_participating_counties_succeeds(self): for i in range(len(result)): expectedResultRow = expectedResult[i] - expectedResultRow["recorded_votes"] = int( - expectedResultRow["recorded_votes"] - ) - expectedResultRow[ - "timestamp_last_updated" - ] = self.scy._parse_date_to_utc( + expectedResultRow["recorded_votes"] = int(expectedResultRow["recorded_votes"]) + expectedResultRow["timestamp_last_updated"] = self.scy._parse_date_to_utc( expectedResultRow["timestamp_last_updated"] ) @@ -111,9 +101,7 @@ def test_get_detailed_results_for_participating_counties_succeeds_for_two_counti ): counties = ["Barrow", "Clarke"] - _, result = self.scy.get_detailed_results_for_participating_counties( - county_names=counties - ) + _, result = self.scy.get_detailed_results_for_participating_counties(county_names=counties) with open(f"{_DIR}/114729_precinct_expected.csv", "r") as expected: expectedResult = csv.DictReader(expected, delimiter=",") @@ -125,12 +113,8 @@ def test_get_detailed_results_for_participating_counties_succeeds_for_two_counti for i, row in enumerate(result): expectedResultRow = filteredExpectedResults[i] - expectedResultRow["recorded_votes"] = int( - expectedResultRow["recorded_votes"] - ) - expectedResultRow[ - "timestamp_last_updated" - ] = self.scy._parse_date_to_utc( + expectedResultRow["recorded_votes"] = int(expectedResultRow["recorded_votes"]) + expectedResultRow["timestamp_last_updated"] = self.scy._parse_date_to_utc( expectedResultRow["timestamp_last_updated"] ) @@ -141,9 +125,7 @@ def test_get_detailed_results_for_participating_counties_missing_counties_update ): counties = ["Barrow"] - _, result = self.scy.get_detailed_results_for_participating_counties( - county_names=counties - ) + _, result = self.scy.get_detailed_results_for_participating_counties(county_names=counties) self.assertNotEqual(result, []) @@ -166,9 +148,7 @@ def test_get_detailed_results_for_participating_counties_skips_if_no_version_upd self.assertEqual(result, []) - _, result = self.scy.get_detailed_results_for_participating_counties( - force_update=True - ) + _, result = self.scy.get_detailed_results_for_participating_counties(force_update=True) self.assertNotEqual(result, []) @@ -210,16 +190,12 @@ def _mock_responses(self, m: requests_mock.Mocker): state=TEST_STATE, election_id=TEST_ELECTION_ID, version_num=TEST_VERSION_NUM ) - with open( - f"{_DIR}/GA_114729_296262_county_election_settings.json", "r" - ) as details_file: + with open(f"{_DIR}/GA_114729_296262_county_election_settings.json", "r") as details_file: m.get(mock_election_settings_url, text=details_file.read()) for file in os.listdir(f"{_DIR}/mock_responses"): with open(f"{_DIR}/mock_responses/{file}", "rb") as details_file: - file_url = f"https://results.enr.clarityelections.com/{file}".replace( - "_", "/" - ) + file_url = f"https://results.enr.clarityelections.com/{file}".replace("_", "/") m.get(file_url, content=details_file.read()) mock_summary_csv_url = scytl.SUMMARY_CSV_ZIP_URL_TEMPLATE.format( diff --git a/test/test_sendmail.py b/test/test_sendmail.py index 465b8cc7c0..47a56d6b8a 100644 --- a/test/test_sendmail.py +++ b/test/test_sendmail.py @@ -41,31 +41,23 @@ def test_message_contents_set_appropriately(self, dummy_sendmail): class TestSendMailCreateMessageHtml: def test_creates_multipart_message(self, dummy_sendmail): - message = dummy_sendmail._create_message_html( - "from", "to", "subject", "text", "html" - ) + message = dummy_sendmail._create_message_html("from", "to", "subject", "text", "html") assert isinstance(message, MIMEMultipart) def test_sets_to_from_subject(self, dummy_sendmail): - message = dummy_sendmail._create_message_html( - "from", "to", "subject", "text", "html" - ) + message = dummy_sendmail._create_message_html("from", "to", "subject", "text", "html") assert message.get("from") == "from" assert message.get("to") == "to" assert message.get("subject") == "subject" def test_works_if_no_message_text(self, dummy_sendmail): - message = dummy_sendmail._create_message_html( - "from", "to", "subject", None, "html" - ) + message = dummy_sendmail._create_message_html("from", "to", "subject", None, "html") assert len(message.get_payload()) == 1 assert message.get_payload()[0].get_payload() == "html" assert message.get_payload()[0].get_content_type() == "text/html" def test_works_with_text_and_html(self, dummy_sendmail): - message = dummy_sendmail._create_message_html( - "from", "to", "subject", "text", "html" - ) + message = dummy_sendmail._create_message_html("from", "to", "subject", "text", "html") assert len(message.get_payload()) == 2 assert message.get_payload()[0].get_payload() == "text" assert message.get_payload()[0].get_content_type() == "text/plain" @@ -75,9 +67,7 @@ def test_works_with_text_and_html(self, dummy_sendmail): class TestSendMailCreateMessageAttachments: def test_creates_multipart_message(self, dummy_sendmail): - message = dummy_sendmail._create_message_attachments( - "from", "to", "subject", "text", [] - ) + message = dummy_sendmail._create_message_attachments("from", "to", "subject", "text", []) assert isinstance(message, MIMEMultipart) def test_can_handle_html(self, dummy_sendmail): @@ -103,9 +93,7 @@ def test_can_handle_html(self, dummy_sendmail): ), # This will fail if the method is updated to parse video ], ) - def test_properly_detects_file_types( - self, tmp_path, dummy_sendmail, filename, expected_type - ): + def test_properly_detects_file_types(self, tmp_path, dummy_sendmail, filename, expected_type): filename = tmp_path / filename filename.write_bytes(b"Parsons") message = dummy_sendmail._create_message_attachments( @@ -150,15 +138,11 @@ def _send_message(self, message): return PatchedSendMail() def test_errors_when_send_message_not_implemented(self): - with pytest.raises( - TypeError, match="Can't instantiate abstract class SendMail" - ): + with pytest.raises(TypeError, match="Can't instantiate abstract class SendMail"): SendMail().send_email("from@from.com", "to@to.com", "subject", "text") def test_can_handle_lists_of_emails(self, patched_sendmail): - patched_sendmail.send_email( - "from", ["to1@to1.com", "to2@to2.com"], "subject", "text" - ) + patched_sendmail.send_email("from", ["to1@to1.com", "to2@to2.com"], "subject", "text") assert patched_sendmail.message.get("to") == "to1@to1.com, to2@to2.com" def test_errors_if_an_email_in_a_list_doesnt_validate(self, patched_sendmail): @@ -172,21 +156,13 @@ def test_errors_if_no_to_email_is_specified(self, patched_sendmail): patched_sendmail.send_email("from", [], "subject", "text") def test_appropriately_dispatches_html_email(self, patched_sendmail): - patched_sendmail.send_email( - "from", "to@to.com", "subject", "text", message_html="html" - ) + patched_sendmail.send_email("from", "to@to.com", "subject", "text", message_html="html") assert len(patched_sendmail.message.get_payload()) == 2 - assert ( - patched_sendmail.message.get_payload()[1].get_content_type() == "text/html" - ) + assert patched_sendmail.message.get_payload()[1].get_content_type() == "text/html" - def test_appropriately_handles_filename_specified_as_string( - self, tmp_path, patched_sendmail - ): + def test_appropriately_handles_filename_specified_as_string(self, tmp_path, patched_sendmail): filename = tmp_path / "test.txt" filename.write_bytes(b"Parsons") - patched_sendmail.send_email( - "from", "to@to.com", "subject", "text", files=str(filename) - ) + patched_sendmail.send_email("from", "to@to.com", "subject", "text", files=str(filename)) assert len(patched_sendmail.message.get_payload()) == 2 assert isinstance(patched_sendmail.message.get_payload()[1], MIMEText) diff --git a/test/test_sftp.py b/test/test_sftp.py index 09a01ba0af..3963daeef5 100644 --- a/test/test_sftp.py +++ b/test/test_sftp.py @@ -29,8 +29,7 @@ ] CSV_PATH, COMPRESSED_CSV_PATH, EMPTY_PATH, SUBDIR_A_PATH, SUBDIR_B_PATH = [ - f"{REMOTE_DIR}/{content}" - for content in (CSV, COMPRESSED_CSV, EMPTY, SUBDIR_A, SUBDIR_B) + f"{REMOTE_DIR}/{content}" for content in (CSV, COMPRESSED_CSV, EMPTY, SUBDIR_A, SUBDIR_B) ] CSV_A_PATH, CSV_B_PATH = [ @@ -76,9 +75,7 @@ def live_sftp(simple_csv_path, simple_compressed_csv_path, simple_table): # noq # This second live_sftp fixture is used for test_get_files so that files are never downloaded and # mocks can be inspected. @pytest.fixture -def live_sftp_with_mocked_get( - simple_csv_path, simple_compressed_csv_path # noqa: F811 -): +def live_sftp_with_mocked_get(simple_csv_path, simple_compressed_csv_path): # noqa: F811 SFTP_with_mocked_get = deepcopy(SFTP) # The names of temp files are long arbitrary strings. This makes them predictable. @@ -195,9 +192,7 @@ def test_table_to_sftp_csv(live_sftp, simple_table, compression): # noqa F811 remote_path = f"{REMOTE_DIR}/test_to_sftp.csv" if compression == "gzip": remote_path += ".gz" - simple_table.to_sftp_csv( - remote_path, host, username, password, compression=compression - ) + simple_table.to_sftp_csv(remote_path, host, username, password, compression=compression) local_path = live_sftp.get_file(remote_path) assert_file_matches_table(local_path, simple_table) @@ -293,9 +288,7 @@ def test_get_files_calls_get_to_write_to_provided_local_paths( live_sftp_with_mocked_get, ): live_sftp, get = live_sftp_with_mocked_get - results = live_sftp.get_files( - remote=[SUBDIR_A_PATH, SUBDIR_B_PATH], local_paths=local_paths - ) + results = live_sftp.get_files(remote=[SUBDIR_A_PATH, SUBDIR_B_PATH], local_paths=local_paths) assert get.call_count == 2 calls = [call(CSV_A_PATH, local_paths[0]), call(CSV_B_PATH, local_paths[1])] assert_has_calls(get, calls) @@ -304,9 +297,7 @@ def test_get_files_calls_get_to_write_to_provided_local_paths( @mark_live_test @pytest.mark.parametrize("kwargs,expected", args_and_expected["get_files"]) -def test_get_files_calls_get_to_write_temp_files( - kwargs, expected, live_sftp_with_mocked_get -): +def test_get_files_calls_get_to_write_temp_files(kwargs, expected, live_sftp_with_mocked_get): live_sftp, get = live_sftp_with_mocked_get live_sftp.get_files(**kwargs) assert get.call_count == len(expected) diff --git a/test/test_sftp_ssh.py b/test/test_sftp_ssh.py index 298e8ed8a3..60e29accbf 100644 --- a/test/test_sftp_ssh.py +++ b/test/test_sftp_ssh.py @@ -143,9 +143,7 @@ def test_table_to_sftp_csv(live_sftp, simple_table, compression): # noqa: F811 @mark_live_test @pytest.mark.parametrize("compression", [None, "gzip"]) -def test_table_to_sftp_csv_no_password( - live_sftp, simple_table, compression # noqa: F811 -): +def test_table_to_sftp_csv_no_password(live_sftp, simple_table, compression): # noqa: F811 host = os.environ.get("SFTP_HOST") username = os.environ.get("SFTP_USERNAME") rsa_private_key_file = os.environ.get("SFTP_RSA_PRIVATE_KEY_FILE") diff --git a/test/test_shopify.py b/test/test_shopify.py index 87601794d0..780e30ee8f 100644 --- a/test/test_shopify.py +++ b/test/test_shopify.py @@ -101,19 +101,14 @@ def test_get_orders(self, m): json=self.mock_orders_since, ) m.get( - self.shopify.get_query_url(None, None, "orders", False) - + "&financial_status=paid", + self.shopify.get_query_url(None, None, "orders", False) + "&financial_status=paid", json=self.mock_orders_completed, ) - assert_matching_tables( - self.shopify.get_orders(None, None, False), self.mock_result_all - ) + assert_matching_tables(self.shopify.get_orders(None, None, False), self.mock_result_all) assert_matching_tables( self.shopify.get_orders("2020-10-20", None, False), self.mock_result_date ) - assert_matching_tables( - self.shopify.get_orders(None, 2, False), self.mock_result_since - ) + assert_matching_tables(self.shopify.get_orders(None, 2, False), self.mock_result_since) assert_matching_tables( self.shopify.get_orders(None, None, True), self.mock_result_completed ) @@ -145,9 +140,7 @@ def test_get_query_url(self, m): @requests_mock.Mocker() def test_graphql(self, m): m.post( - "https://{0}.myshopify.com/admin/api/{1}/graphql.json".format( - SUBDOMAIN, API_VERSION - ), + "https://{0}.myshopify.com/admin/api/{1}/graphql.json".format(SUBDOMAIN, API_VERSION), json=self.mock_graphql, ) self.assertEqual( diff --git a/test/test_sisense/test_sisense.py b/test/test_sisense/test_sisense.py index 0148b375ba..c8ec43990c 100644 --- a/test/test_sisense/test_sisense.py +++ b/test/test_sisense/test_sisense.py @@ -22,9 +22,7 @@ def test_init(self): self.assertEqual(sisense.site_name, "my_site_name") self.assertEqual(sisense.api_key, "my_api_key") self.assertEqual(sisense.api.uri, "https://app.periscopedata.com/api/v1/") - self.assertEqual( - sisense.api.headers["HTTP-X-PARTNER-AUTH"], "my_site_name:my_api_key" - ) + self.assertEqual(sisense.api.headers["HTTP-X-PARTNER-AUTH"], "my_site_name:my_api_key") @requests_mock.Mocker() def test_publish_shared_dashboard(self, m): @@ -39,9 +37,7 @@ def test_publish_shared_dashboard(self, m): @requests_mock.Mocker() def test_list_shared_dashboards(self, m): - m.post( - f"{self.sisense.uri}shared_dashboard/list", json=TEST_LIST_SHARED_DASHBOARDS - ) + m.post(f"{self.sisense.uri}shared_dashboard/list", json=TEST_LIST_SHARED_DASHBOARDS) self.assertEqual( self.sisense.list_shared_dashboards(dashboard_id="1234"), TEST_LIST_SHARED_DASHBOARDS, diff --git a/test/test_slack/test_slack.py b/test/test_slack/test_slack.py index e72669c2d8..d3a9dc498e 100644 --- a/test/test_slack/test_slack.py +++ b/test/test_slack/test_slack.py @@ -289,6 +289,4 @@ def test_file_upload(self, m): json={"ok": False, "error": "invalid_auth"}, ) - self.assertRaises( - SlackClientError, self.slack.upload_file, ["D0L4B9P0Q"], file_path - ) + self.assertRaises(SlackClientError, self.slack.upload_file, ["D0L4B9P0Q"], file_path) diff --git a/test/test_smtp.py b/test/test_smtp.py index 481af7c413..1da36788b6 100644 --- a/test/test_smtp.py +++ b/test/test_smtp.py @@ -55,13 +55,9 @@ def test_send_message_html(self): self.assertTrue(self.quit_ran) def test_send_message_manualclose(self): - smtp = SMTP( - "fake.example.com", username="fake", password="fake", close_manually=True - ) + smtp = SMTP("fake.example.com", username="fake", password="fake", close_manually=True) smtp.conn = FakeConnection(self) - smtp.send_email( - "foo@example.com", "recipient1@example.com", "Simple subject", "Fake body" - ) + smtp.send_email("foo@example.com", "recipient1@example.com", "Simple subject", "Fake body") self.assertFalse(self.quit_ran) def test_send_message_files(self): diff --git a/test/test_targetsmart/test_targetsmart_api.py b/test/test_targetsmart/test_targetsmart_api.py index e8f01975a8..80d8fab5c1 100644 --- a/test/test_targetsmart/test_targetsmart_api.py +++ b/test/test_targetsmart/test_targetsmart_api.py @@ -180,9 +180,7 @@ def test_district_point(self, m): self.assertTrue( validate_list( district_expected, - self.ts.district( - search_type="point", latitude="41.898369", longitude="-87.694382" - ), + self.ts.district(search_type="point", latitude="41.898369", longitude="-87.694382"), ) ) @@ -204,9 +202,7 @@ def test_district_address(self, m): self.assertTrue( validate_list( district_expected, - self.ts.district( - search_type="address", address="908 N Main St, Chicago, IL 60611" - ), + self.ts.district(search_type="address", address="908 N Main St, Chicago, IL 60611"), ) ) diff --git a/test/test_targetsmart/test_targetsmart_automation.py b/test/test_targetsmart/test_targetsmart_automation.py index 124ac06c6d..343eff3bb6 100644 --- a/test/test_targetsmart/test_targetsmart_automation.py +++ b/test/test_targetsmart/test_targetsmart_automation.py @@ -39,16 +39,12 @@ def test_create_job_xml(self): def test_config_status(self): # Find good configuration - self.sftp.put_file( - self.test_xml, f"{self.ts.sftp_dir}/{self.job_name}.job.xml.good" - ) + self.sftp.put_file(self.test_xml, f"{self.ts.sftp_dir}/{self.job_name}.job.xml.good") self.assertTrue(self.ts.config_status(self.job_name)) self.ts.remove_files(self.job_name) # Find bad configuration - self.sftp.put_file( - self.test_xml, f"{self.ts.sftp_dir}/{self.job_name}.job.xml.bad" - ) + self.sftp.put_file(self.test_xml, f"{self.ts.sftp_dir}/{self.job_name}.job.xml.bad") self.assertRaises(ValueError, self.ts.config_status, self.job_name) @mark_live_test diff --git a/test/test_targetsmart/test_targetsmart_smartmatch.py b/test/test_targetsmart/test_targetsmart_smartmatch.py index 7a8ef2f3a4..f39c56fd7c 100644 --- a/test/test_targetsmart/test_targetsmart_smartmatch.py +++ b/test/test_targetsmart/test_targetsmart_smartmatch.py @@ -62,9 +62,7 @@ def raw_outgz(raw_outcsv): @pytest.fixture def final_outtable(prep_intable, raw_outtable): - return petl.leftjoin(prep_intable, raw_outtable, key="matchback_id").cutout( - "matchback_id" - ) + return petl.leftjoin(prep_intable, raw_outtable, key="matchback_id").cutout("matchback_id") @pytest.fixture @@ -86,9 +84,7 @@ def test_smartmatch( poll_resp = {"url": "https://mock_smartmatch_download_endpoint", "error": None} requests_mock.get("https://api.targetsmart.com/service/smartmatch", json=resp1) requests_mock.put(resp1["url"]) - requests_mock.get( - "https://api.targetsmart.com/service/smartmatch/poll", json=poll_resp - ) + requests_mock.get("https://api.targetsmart.com/service/smartmatch/poll", json=poll_resp) requests_mock.get(poll_resp["url"], content=raw_outgz) results = ts.smartmatch(intable).to_petl() diff --git a/test/test_twilio/test_twilio.py b/test/test_twilio/test_twilio.py index 748bc00aa6..b4cef46ac9 100644 --- a/test/test_twilio/test_twilio.py +++ b/test/test_twilio/test_twilio.py @@ -35,9 +35,7 @@ def test_get_account_usage(self): # Make sure that it is calling the correct Twilio methods self.twilio.get_account_usage(time_period="today") - assert self.twilio.client.usage.records.today.list.called_with( - time_period="today" - ) + assert self.twilio.client.usage.records.today.list.called_with(time_period="today") self.twilio.get_account_usage(time_period="last_month") assert self.twilio.client.usage.records.last_month.list.called_with( time_period="last_month" @@ -47,20 +45,12 @@ def test_get_account_usage(self): time_period="this_month" ) self.twilio.get_account_usage(time_period="yesterday") - assert self.twilio.client.usage.records.today.list.called_with( - time_period="yesterday" - ) + assert self.twilio.client.usage.records.today.list.called_with(time_period="yesterday") # Make sure that it is calling the correct Twilio methods self.twilio.get_account_usage(time_period="daily", start_date="10-19-2019") - assert self.twilio.client.usage.records.daily.list.called_with( - start_date="10-19-2019" - ) + assert self.twilio.client.usage.records.daily.list.called_with(start_date="10-19-2019") self.twilio.get_account_usage(time_period="monthly", start_date="10-19-2019") - assert self.twilio.client.usage.records.monthly.list.called_with( - start_date="10-19-2019" - ) + assert self.twilio.client.usage.records.monthly.list.called_with(start_date="10-19-2019") self.twilio.get_account_usage(time_period="yearly", start_date="10-19-2019") - assert self.twilio.client.usage.records.yearly.list.called_with( - start_date="10-19-2019" - ) + assert self.twilio.client.usage.records.yearly.list.called_with(start_date="10-19-2019") diff --git a/test/test_utilities.py b/test/test_utilities.py index ec4a4eaf2d..359c938ded 100644 --- a/test/test_utilities.py +++ b/test/test_utilities.py @@ -28,9 +28,7 @@ def test_date_to_timestamp(date, exp_ts): def test_parse_date(): # Test parsing an ISO8601 string - expected = datetime.datetime( - year=2020, month=1, day=1, tzinfo=datetime.timezone.utc - ) + expected = datetime.datetime(year=2020, month=1, day=1, tzinfo=datetime.timezone.utc) parsed = parse_date("2020-01-01T00:00:00.000 UTC") assert parsed == expected, parsed diff --git a/test/test_utilities/test_format_phone_number.py b/test/test_utilities/test_format_phone_number.py index 5a9a9f1143..aeafb8ad7c 100644 --- a/test/test_utilities/test_format_phone_number.py +++ b/test/test_utilities/test_format_phone_number.py @@ -21,9 +21,7 @@ def test_format_phone_number_us_number_with_leading_1(self): def test_format_phone_number_international_number(self): phone_number = "+441234567890" expected_result = "+441234567890" - self.assertEqual( - format_phone_number(phone_number, country_code="44"), expected_result - ) + self.assertEqual(format_phone_number(phone_number, country_code="44"), expected_result) def test_format_phone_number_invalid_length(self): phone_number = "12345" diff --git a/test/test_van/test_activist_codes.py b/test/test_van/test_activist_codes.py index 02bfeecb64..df3a635935 100644 --- a/test/test_van/test_activist_codes.py +++ b/test/test_van/test_activist_codes.py @@ -79,31 +79,23 @@ def test_get_activist_code(self, m): def test_toggle_activist_code(self, m): # Test apply activist code - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) + m.post(self.van.connection.uri + "people/2335282/canvassResponses", status_code=204) self.assertTrue(self.van.toggle_activist_code(2335282, 4429154, "apply"), 204) # Test remove activist code - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) + m.post(self.van.connection.uri + "people/2335282/canvassResponses", status_code=204) self.assertTrue(self.van.toggle_activist_code(2335282, 4429154, "remove"), 204) @requests_mock.Mocker() def test_apply_activist_code(self, m): # Test apply activist code - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) + m.post(self.van.connection.uri + "people/2335282/canvassResponses", status_code=204) self.assertEqual(self.van.apply_activist_code(2335282, 4429154), 204) @requests_mock.Mocker() def test_remove_activist_code(self, m): # Test remove activist code - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) + m.post(self.van.connection.uri + "people/2335282/canvassResponses", status_code=204) self.assertEqual(self.van.remove_activist_code(2335282, 4429154), 204) diff --git a/test/test_van/test_bulkimport.py b/test/test_van/test_bulkimport.py index 8c0d738f10..f667d2e990 100644 --- a/test/test_van/test_bulkimport.py +++ b/test/test_van/test_bulkimport.py @@ -69,9 +69,7 @@ def test_get_bulk_import_mapping_types(self, m): m.get(self.van.connection.uri + "bulkImportMappingTypes", json=[mapping_type]) - assert_matching_tables( - self.van.get_bulk_import_mapping_types(), Table([mapping_type]) - ) + assert_matching_tables(self.van.get_bulk_import_mapping_types(), Table([mapping_type])) @requests_mock.Mocker() def test_get_bulk_import_mapping_type(self, m): @@ -81,9 +79,7 @@ def test_get_bulk_import_mapping_type(self, m): json=mapping_type, ) - self.assertEqual( - self.van.get_bulk_import_mapping_type("ActivistCode"), mapping_type - ) + self.assertEqual(self.van.get_bulk_import_mapping_type("ActivistCode"), mapping_type) @requests_mock.Mocker() def get_bulk_import_mapping_type_fields(self, m): @@ -98,9 +94,7 @@ def get_bulk_import_mapping_type_fields(self, m): + "bulkImportMappingTypes/Email/EmailSubscriptionStatusId/values" ) - r = self.van.get_bulk_import_mapping_type_fields( - "Email", "EmailSubscriptionStatusId" - ) + r = self.van.get_bulk_import_mapping_type_fields("Email", "EmailSubscriptionStatusId") self.assertEqual(json, r) @requests_mock.Mocker() @@ -136,9 +130,7 @@ def test_bulk_apply_activist_codes(self, m): m.post(self.van.connection.uri + "bulkImportJobs", json={"jobId": 54679}) - job_id = self.van.bulk_apply_activist_codes( - tbl, url_type="S3", bucket="my-bucket" - ) + job_id = self.van.bulk_apply_activist_codes(tbl, url_type="S3", bucket="my-bucket") self.assertEqual(job_id, 54679) @@ -153,9 +145,7 @@ def test_bulk_apply_suppressions(self, m): m.post(self.van.connection.uri + "bulkImportJobs", json={"jobId": 54679}) - job_id = self.van.bulk_apply_suppressions( - tbl, url_type="S3", bucket="my-bucket" - ) + job_id = self.van.bulk_apply_suppressions(tbl, url_type="S3", bucket="my-bucket") self.assertEqual(job_id, 54679) @@ -206,9 +196,7 @@ def test_bulk_upsert_contacts(self, m): "isRequired": False, "canBeMappedToColumn": True, "canBeMappedByName": True, - "parents": [ - {"parentFieldName": "CanvassedBy", "limitedToParentValues": None} - ], + "parents": [{"parentFieldName": "CanvassedBy", "limitedToParentValues": None}], }, { "name": "ContactTypeID", @@ -217,9 +205,7 @@ def test_bulk_upsert_contacts(self, m): "isRequired": False, "canBeMappedToColumn": True, "canBeMappedByName": True, - "parents": [ - {"parentFieldName": "CanvassedBy", "limitedToParentValues": None} - ], + "parents": [{"parentFieldName": "CanvassedBy", "limitedToParentValues": None}], }, ], } diff --git a/test/test_van/test_changed_entities.py b/test/test_van/test_changed_entities.py index 8eb92be3bd..b9afddb412 100644 --- a/test/test_van/test_changed_entities.py +++ b/test/test_van/test_changed_entities.py @@ -86,9 +86,7 @@ def test_get_changed_entities(self, m): tbl = Table([{"a": 1, "b": 2}]) m.post(self.van.connection.uri + "changedEntityExportJobs", json=json) - m.get( - self.van.connection.uri + "changedEntityExportJobs/2170181229", json=json2 - ) + m.get(self.van.connection.uri + "changedEntityExportJobs/2170181229", json=json2) Table.from_csv = mock.MagicMock() Table.from_csv.return_value = tbl diff --git a/test/test_van/test_codes.py b/test/test_van/test_codes.py index a875e9c71b..7630a97111 100644 --- a/test/test_van/test_codes.py +++ b/test/test_van/test_codes.py @@ -78,9 +78,7 @@ def test_create_code(self, m): # Test that it doesn't throw and error r = self.van.create_code( "Test Code", - supported_entities=[ - {"name": "Events", "is_searchable": True, "is_applicable": True} - ], + supported_entities=[{"name": "Events", "is_searchable": True, "is_applicable": True}], ) self.assertEqual(r, 1004960) diff --git a/test/test_van/test_locations.py b/test/test_van/test_locations.py index 6b904b643f..7e6ebc6a1e 100644 --- a/test/test_van/test_locations.py +++ b/test/test_van/test_locations.py @@ -60,9 +60,7 @@ class TestLocations(unittest.TestCase): def setUp(self): - self.van = VAN( - os.environ["VAN_API_KEY"], db="EveryAction", raise_for_status=False - ) + self.van = VAN(os.environ["VAN_API_KEY"], db="EveryAction", raise_for_status=False) def tearDown(self): diff --git a/test/test_van/test_ngpvan.py b/test/test_van/test_ngpvan.py index 0f30d03725..038a95ebb8 100644 --- a/test/test_van/test_ngpvan.py +++ b/test/test_van/test_ngpvan.py @@ -22,18 +22,14 @@ def test_get_canvass_responses_contact_types(self, m): m.get(self.van.connection.uri + "canvassResponses/contactTypes", json=json) - assert_matching_tables( - Table(json), self.van.get_canvass_responses_contact_types() - ) + assert_matching_tables(Table(json), self.van.get_canvass_responses_contact_types()) @requests_mock.Mocker() def test_get_canvass_responses_input_types(self, m): json = [{"inputTypeId": 11, "name": "API"}] m.get(self.van.connection.uri + "canvassResponses/inputTypes", json=json) - assert_matching_tables( - Table(json), self.van.get_canvass_responses_input_types() - ) + assert_matching_tables(Table(json), self.van.get_canvass_responses_input_types()) @requests_mock.Mocker() def test_get_canvass_responses_result_codes(self, m): @@ -48,9 +44,7 @@ def test_get_canvass_responses_result_codes(self, m): ] m.get(self.van.connection.uri + "canvassResponses/resultCodes", json=json) - assert_matching_tables( - Table(json), self.van.get_canvass_responses_result_codes() - ) + assert_matching_tables(Table(json), self.van.get_canvass_responses_result_codes()) @requests_mock.Mocker() def test_get_survey_questions(self, m): @@ -143,9 +137,7 @@ def test_delete_supporter_group(self, m): # bad_vanid = 99999 bad_ep = f"supporterGroups/{bad_supporter_group_id}" m.delete(self.van.connection.uri + bad_ep, status_code=404) - self.assertRaises( - HTTPError, self.van.delete_supporter_group, bad_supporter_group_id - ) + self.assertRaises(HTTPError, self.van.delete_supporter_group, bad_supporter_group_id) @requests_mock.Mocker() def test_add_person_supporter_group(self, m): diff --git a/test/test_van/test_people.py b/test/test_van/test_people.py index eebcf77205..82dfbdc002 100644 --- a/test/test_van/test_people.py +++ b/test/test_van/test_people.py @@ -25,9 +25,7 @@ def test_find_person(self, m): status_code=200, ) - person = self.van.find_person( - first_name="Bob", last_name="Smith", phone=4142020792 - ) + person = self.van.find_person(first_name="Bob", last_name="Smith", phone=4142020792) self.assertEqual(person, find_people_response) @@ -106,9 +104,7 @@ def test_valid_search(self): ) # Successful with FN/LN/Email - self.van._valid_search( - "Barack", "Obama", "barack@email.com", None, None, None, None - ) + self.van._valid_search("Barack", "Obama", "barack@email.com", None, None, None, None) # Successful with FN/LN/DOB/ZIP self.van._valid_search( @@ -143,9 +139,7 @@ def test_delete_person(self, m): @requests_mock.Mocker() def test_apply_canvass_result(self, m): # Test a valid attempt - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) + m.post(self.van.connection.uri + "people/2335282/canvassResponses", status_code=204) self.van.apply_canvass_result(2335282, 18) # Test a bad result code @@ -190,24 +184,16 @@ def test_apply_canvass_result(self, m): self.van.apply_canvass_result(2335282, 18, id_type="DWID") # test canvassing via phone or sms without providing phone number - self.assertRaises( - Exception, self.van.apply_canvass_result, 2335282, 18, contact_type_id=37 - ) + self.assertRaises(Exception, self.van.apply_canvass_result, 2335282, 18, contact_type_id=37) # test canvassing via phone or sms with providing phone number - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) - self.van.apply_canvass_result( - 2335282, 18, contact_type_id=37, phone="(516)-555-2342" - ) + m.post(self.van.connection.uri + "people/2335282/canvassResponses", status_code=204) + self.van.apply_canvass_result(2335282, 18, contact_type_id=37, phone="(516)-555-2342") @requests_mock.Mocker() def test_apply_survey_question(self, m): # Test valid survey question - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) + m.post(self.van.connection.uri + "people/2335282/canvassResponses", status_code=204) self.van.apply_survey_response(2335282, 351006, 1443891) # Test bad survey response id @@ -219,12 +205,8 @@ def test_apply_survey_question(self, m): # 'properties': ['responses[0].surveyResponseId'] # }] # } - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=400 - ) - self.assertRaises( - HTTPError, self.van.apply_survey_response, 2335282, 0, 1443891 - ) + m.post(self.van.connection.uri + "people/2335282/canvassResponses", status_code=400) + self.assertRaises(HTTPError, self.van.apply_survey_response, 2335282, 0, 1443891) # Test bad survey question id # json = { @@ -235,9 +217,7 @@ def test_apply_survey_question(self, m): # 'properties': ['responses[0].surveyQuestionId'] # }] # } - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=400 - ) + m.post(self.van.connection.uri + "people/2335282/canvassResponses", status_code=400) self.assertRaises(HTTPError, self.van.apply_survey_response, 2335282, 351006, 0) def test_toggle_volunteer_action(self): diff --git a/test/test_van/test_scores.py b/test/test_van/test_scores.py index 63761946bc..927359367b 100644 --- a/test/test_van/test_scores.py +++ b/test/test_van/test_scores.py @@ -196,9 +196,7 @@ def test_update_score_status(self, m): ) # Test bad input - self.assertRaises( - ValueError, self.van.update_score_status, score_update_id, "not a thing." - ) + self.assertRaises(ValueError, self.van.update_score_status, score_update_id, "not a thing.") # Test good input self.assertTrue(self.van.update_score_status(score_update_id, "approved")) @@ -214,9 +212,7 @@ def test_upload_scores(self, m): tbl = Table([["vanid", "col"], ["1", ".5"]]) json = {"jobId": 9749} m.post(self.van.connection.uri + "FileLoadingJobs", json=json, status_code=201) - self.van.upload_scores( - tbl, [{"score_id": 9999, "score_column": "col"}], url_type="S3" - ) + self.van.upload_scores(tbl, [{"score_id": 9999, "score_column": "col"}], url_type="S3") @requests_mock.Mocker() def test_create_file_load(self, m): diff --git a/test/test_van/test_signups.py b/test/test_van/test_signups.py index 27d9bb8c5b..8e47231d96 100644 --- a/test/test_van/test_signups.py +++ b/test/test_van/test_signups.py @@ -83,9 +83,7 @@ class TestSignups(unittest.TestCase): def setUp(self): - self.van = VAN( - os.environ["VAN_API_KEY"], db="EveryAction", raise_for_status=False - ) + self.van = VAN(os.environ["VAN_API_KEY"], db="EveryAction", raise_for_status=False) def tearDown(self): @@ -98,9 +96,7 @@ def test_get_signup_statuses(self, m): # Test events lookup self.assertTrue( - validate_list( - ["statusId", "name"], self.van.get_signups_statuses(event_id=750000849) - ) + validate_list(["statusId", "name"], self.van.get_signups_statuses(event_id=750000849)) ) # Test event type lookup @@ -119,9 +115,7 @@ def test_get_signups(self, m): m.get(self.van.connection.uri + "signups", json=json) self.assertTrue( - validate_list( - signup_expected, self.van.get_event_signups(event_id=750001004) - ) + validate_list(signup_expected, self.van.get_event_signups(event_id=750001004)) ) self.assertTrue( @@ -133,9 +127,7 @@ def test_get_signup(self, m): event_signup_id = 14285 - m.get( - self.van.connection.uri + f"signups/{event_signup_id}".format(), json=signup - ) + m.get(self.van.connection.uri + f"signups/{event_signup_id}".format(), json=signup) self.assertEqual(signup, self.van.get_signup(event_signup_id)) @@ -144,9 +136,7 @@ def test_create_signup(self, m): m.post(self.van.connection.uri + "signups", json=14285, status_code=201) - self.assertEqual( - self.van.create_signup(100349920, 750001004, 19076, 263920, 11, 3), 14285 - ) + self.assertEqual(self.van.create_signup(100349920, 750001004, 19076, 263920, 11, 3), 14285) @requests_mock.Mocker() def test_update_signup(self, m): diff --git a/test/test_van/test_targets.py b/test/test_van/test_targets.py index be4f1dc973..d041bef680 100644 --- a/test/test_van/test_targets.py +++ b/test/test_van/test_targets.py @@ -163,9 +163,5 @@ def test_get_target_export(self, fromcsv, m): ] ) - assert_matching_tables( - self.van.get_target_export(export_job_id), expected_result - ) - self.assertEqual( - fromcsv.call_args, unittest.mock.call(download_url, encoding="utf-8-sig") - ) + assert_matching_tables(self.van.get_target_export(export_job_id), expected_result) + self.assertEqual(fromcsv.call_args, unittest.mock.call(download_url, encoding="utf-8-sig")) diff --git a/useful_resources/sample_code/actblue_to_google_sheets.py b/useful_resources/sample_code/actblue_to_google_sheets.py index da079678ce..b1520b568a 100644 --- a/useful_resources/sample_code/actblue_to_google_sheets.py +++ b/useful_resources/sample_code/actblue_to_google_sheets.py @@ -55,17 +55,11 @@ # your email address. Once the sheet has been created you may add user permissions in Google Sheets. editor_email = "" if not editor_email: - raise ValueError( - "editor_email is required to enable access to the new Google Sheet" - ) + raise ValueError("editor_email is required to enable access to the new Google Sheet") # Step 2: Specify what contribution data you want from ActBlue -date_range_start = ( - "2022-01-01" # Start of date range to withdraw contribution data (inclusive). -) -date_range_end = ( - "2022-02-01" # End of date range to withdraw contribution data (exclusive). -) +date_range_start = "2022-01-01" # Start of date range to withdraw contribution data (inclusive). +date_range_end = "2022-02-01" # End of date range to withdraw contribution data (exclusive). csv_type = "paid_contributions" # csv_type options: # 'paid_contributions': @@ -79,9 +73,7 @@ # form. # Step 3: Retrieve data from ActBlue and hold it in a Parsons Table. -contribution_data = actblue.get_contributions( - csv_type, date_range_start, date_range_end -) +contribution_data = actblue.get_contributions(csv_type, date_range_start, date_range_end) # Step 4: Create a spreadsheet on Google Sheets sheet_id = google_sheets.create_spreadsheet(spreadsheet_name, editor_email=editor_email) diff --git a/useful_resources/sample_code/apply_activist_code.py b/useful_resources/sample_code/apply_activist_code.py index 1ced1353b3..b11a78c536 100644 --- a/useful_resources/sample_code/apply_activist_code.py +++ b/useful_resources/sample_code/apply_activist_code.py @@ -40,12 +40,8 @@ # Create dictionary of VAN states and API keys from multiline Civis credential -myv_states = { - x.split(",")[0]: x.split(",")[1] for x in os.environ["VAN_PASSWORD"].split("\r\n") -} -myv_keys = { - k: VAN(api_key=v, db=os.environ["VAN_DB_NAME"]) for k, v in myv_states.items() -} +myv_states = {x.split(",")[0]: x.split(",")[1] for x in os.environ["VAN_PASSWORD"].split("\r\n")} +myv_keys = {k: VAN(api_key=v, db=os.environ["VAN_DB_NAME"]) for k, v in myv_states.items()} # Create simple set of states for insertion into SQL states = "','".join([s for s in myv_keys]) @@ -72,6 +68,4 @@ for vanid in state_set: # TODO: row undefined, select row form record? row = None - key.toggle_activist_code( - row["vb_smartvan_id"], row["activist_code_id"], "apply" - ) + key.toggle_activist_code(row["vb_smartvan_id"], row["activist_code_id"], "apply") diff --git a/useful_resources/sample_code/civis_job_status_slack_alert.py b/useful_resources/sample_code/civis_job_status_slack_alert.py index 6c0885a49e..d7676b8777 100644 --- a/useful_resources/sample_code/civis_job_status_slack_alert.py +++ b/useful_resources/sample_code/civis_job_status_slack_alert.py @@ -17,9 +17,7 @@ # Configuration variables SLACK_CHANNEL = "" # Slack channel where the alert will post. -CIVIS_PROJECT = ( - "" # ID of the Civis project with jobs and workflows you want to see the status of. -) +CIVIS_PROJECT = "" # ID of the Civis project with jobs and workflows you want to see the status of. logger = logging.getLogger(__name__) _handler = logging.StreamHandler() @@ -84,9 +82,7 @@ def get_last_success(object_id, object_type): last_success = "-" if object_type == "workflow": - workflow_executions = client.workflows.list_executions( - object_id, order="updated_at" - ) + workflow_executions = client.workflows.list_executions(object_id, order="updated_at") for execution in workflow_executions: if execution["state"] != "succeeded": continue @@ -116,13 +112,9 @@ def main(): project_name = client.projects.get(CIVIS_PROJECT)["name"] - scripts_table = get_workflows_and_jobs(CIVIS_PROJECT).sort( - columns=["state", "name"] - ) + scripts_table = get_workflows_and_jobs(CIVIS_PROJECT).sort(columns=["state", "name"]) - logger.info( - f"Found {scripts_table.num_rows} jobs and workflows in {project_name} project." - ) + logger.info(f"Found {scripts_table.num_rows} jobs and workflows in {project_name} project.") # This is a list of strings we will build with each job's status output_lines = [] diff --git a/useful_resources/sample_code/mysql_to_googlesheets.py b/useful_resources/sample_code/mysql_to_googlesheets.py index fc4312de02..bcdb324c84 100644 --- a/useful_resources/sample_code/mysql_to_googlesheets.py +++ b/useful_resources/sample_code/mysql_to_googlesheets.py @@ -43,9 +43,7 @@ def try_overwrite(table, request_count, sheet_id, tab_index): try: - gsheets.overwrite_sheet( - sheet_id, table, worksheet=tab_index, user_entered_value=False - ) + gsheets.overwrite_sheet(sheet_id, table, worksheet=tab_index, user_entered_value=False) except APIError as e: print(f"trying to overwrite {tab_index} for the {request_count}th time") @@ -60,31 +58,23 @@ def main(): logger.info(f"Creating Google Sheets workbook called '{TITLE}'") try: - new_sheet = gsheets.create_spreadsheet( - title=TITLE, editor_email=None, folder_id=FOLDER_ID - ) + new_sheet = gsheets.create_spreadsheet(title=TITLE, editor_email=None, folder_id=FOLDER_ID) # If successful new_sheet will be the spreadsheet's ID in a string if isinstance(new_sheet, str): logger.info(f"Successfully created sheet {TITLE}!") # If we do not get a string back from the create_spreadsheet call # then something went wrong. Print the response. else: - logger.info( - f"create_spreadsheet did not return a sheet ID. Issue: {str(new_sheet)}" - ) + logger.info(f"create_spreadsheet did not return a sheet ID. Issue: {str(new_sheet)}") # If we get an error when trying to create the spreadsheet we print the error. except Exception as e: - logger.info( - f"There was a problem creating the Google Sheets workbook! Error: {str(e)}" - ) + logger.info(f"There was a problem creating the Google Sheets workbook! Error: {str(e)}") logger.info("Querying MYSQL database...") query_results = mysql.query(QUERY) - logger.info( - f"Querying complete. Preparing to load data into Google Sheets tab {TAB_LABEL}" - ) + logger.info(f"Querying complete. Preparing to load data into Google Sheets tab {TAB_LABEL}") query_results.convert_columns_to_str() request_count = 0 tab_index = gsheets.add_sheet(new_sheet, title=TAB_LABEL) diff --git a/useful_resources/sample_code/opt_outs_everyaction.py b/useful_resources/sample_code/opt_outs_everyaction.py index e6061e9e3a..70748bd446 100644 --- a/useful_resources/sample_code/opt_outs_everyaction.py +++ b/useful_resources/sample_code/opt_outs_everyaction.py @@ -165,13 +165,9 @@ def main(): # Here we narrow the all_opt_outs table to only the rows that correspond # to this committee. - opt_outs = all_opt_outs.select_rows( - lambda row: str(row.committeeid) == committeeid - ) + opt_outs = all_opt_outs.select_rows(lambda row: str(row.committeeid) == committeeid) - logger.info( - f"Found {opt_outs.num_rows} phones to opt out in {committee_name} committee..." - ) + logger.info(f"Found {opt_outs.num_rows} phones to opt out in {committee_name} committee...") # Now we actually update the records @@ -195,9 +191,7 @@ def main(): if len(success_log) > 0: success_parsonstable = Table(success_log) logger.info("Copying success data into log table...") - rs.copy( - success_parsonstable, SUCCESS_TABLE, if_exists="append", alter_table=True - ) + rs.copy(success_parsonstable, SUCCESS_TABLE, if_exists="append", alter_table=True) logger.info("Success log complete.") if len(error_log) > 0: diff --git a/useful_resources/sample_code/s3_to_s3.py b/useful_resources/sample_code/s3_to_s3.py index 10509d59db..bb4e546cf1 100644 --- a/useful_resources/sample_code/s3_to_s3.py +++ b/useful_resources/sample_code/s3_to_s3.py @@ -33,9 +33,7 @@ if value.strip() != "": os.environ[name] = value -s3_source = S3( - os.environ["AWS_SOURCE_ACCESS_KEY_ID"], os.environ["AWS_SOURCE_SECRET_ACCESS_KEY"] -) +s3_source = S3(os.environ["AWS_SOURCE_ACCESS_KEY_ID"], os.environ["AWS_SOURCE_SECRET_ACCESS_KEY"]) s3_destination = S3( os.environ["AWS_DESTINATION_ACCESS_KEY_ID"], os.environ["AWS_DESTINATION_SECRET_ACCESS_KEY"], diff --git a/useful_resources/sample_code/update_user_in_actionkit.py b/useful_resources/sample_code/update_user_in_actionkit.py index 95f546025c..2ea2b3d068 100644 --- a/useful_resources/sample_code/update_user_in_actionkit.py +++ b/useful_resources/sample_code/update_user_in_actionkit.py @@ -45,9 +45,7 @@ loaded = [["id", "voterbase_id", "date_updated"]] # column names for log table -source_table = ( - "schema.table" # this is the table with the information I'm pushing to ActionKit -) +source_table = "schema.table" # this is the table with the information I'm pushing to ActionKit # this is where we will log every user id that gets marked with a voterbase_id log_table = "schema.table" diff --git a/useful_resources/sample_code/zoom_to_van.py b/useful_resources/sample_code/zoom_to_van.py index a1bad645ab..b99ae0dfac 100644 --- a/useful_resources/sample_code/zoom_to_van.py +++ b/useful_resources/sample_code/zoom_to_van.py @@ -17,13 +17,9 @@ } VAN_DB = "MyCampaign" # one of: MyMembers, EveryAction, MyCampaign (not MyVoters) -ACTIVIST_CODE_NAME = ( - "" # name of VAN activist code, which must be created manually in VAN -) +ACTIVIST_CODE_NAME = "" # name of VAN activist code, which must be created manually in VAN ZOOM_MEETING_ID = "" -MINIMUM_DURATION = ( - 0 # filters out Zoom participants who stayed for less than minimum duration -) +MINIMUM_DURATION = 0 # filters out Zoom participants who stayed for less than minimum duration # ### CODE @@ -44,9 +40,7 @@ # Gets participants from Zoom meeting participants = zoom.get_past_meeting_participants(ZOOM_MEETING_ID) -filtered_participants = participants.select_rows( - lambda row: row.duration > MINIMUM_DURATION -) +filtered_participants = participants.select_rows(lambda row: row.duration > MINIMUM_DURATION) # Coalesce the columns into something VAN expects column_map = { @@ -72,9 +66,7 @@ # generates list of parameters from matched columns, only inlcudes if row has data for column params = {col: participant[col] for col in column_map.keys() if participant[col]} - van_person = van.upsert_person( - **params - ) # updates if it finds a match, or inserts new user + van_person = van.upsert_person(**params) # updates if it finds a match, or inserts new user if activist_code_id: van.apply_activist_code(