From f604fc89e417596df882fefdff76d3c41c67de12 Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Mon, 13 Nov 2023 09:04:35 -0500 Subject: [PATCH 1/4] Update configuration for ruff --- .github/workflows/python-formatting.yml | 10 ++ .github/workflows/python-linting.yml | 9 +- .pre-commit-config.yaml | 20 ++-- pyproject.toml | 136 ------------------------ ruff.toml | 71 +++++++++++++ 5 files changed, 92 insertions(+), 154 deletions(-) create mode 100644 .github/workflows/python-formatting.yml delete mode 100644 pyproject.toml create mode 100644 ruff.toml diff --git a/.github/workflows/python-formatting.yml b/.github/workflows/python-formatting.yml new file mode 100644 index 0000000..4dc79f7 --- /dev/null +++ b/.github/workflows/python-formatting.yml @@ -0,0 +1,10 @@ +name: check format using ruff +on: [push] +jobs: + ruff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: chartboost/ruff-action@v1 + with: + args: format --check diff --git a/.github/workflows/python-linting.yml b/.github/workflows/python-linting.yml index 2c5fcca..5b842f0 100644 --- a/.github/workflows/python-linting.yml +++ b/.github/workflows/python-linting.yml @@ -1,11 +1,8 @@ -name: Check Python formatting using Black and Ruff - +name: lint code using ruff on: [push] - jobs: - lint: + ruff: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: psf/black@stable + - uses: actions/checkout@v4 - uses: chartboost/ruff-action@v1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dec0523..f055986 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,13 +1,9 @@ repos: -- repo: https://github.com/psf/black - rev: 23.7.0 - hooks: - - id: black - language_version: "python3.10" - -- repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.280 - hooks: - - id: ruff - language_version: "python3.10" - args: [--exit-non-zero-on-fix] +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.1.5 + hooks: + # Run the linter. + - id: ruff + # Run the formatter. + - id: ruff-format diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index fe58854..0000000 --- a/pyproject.toml +++ /dev/null @@ -1,136 +0,0 @@ -[tool.black] -include = '\.pyi?$' -exclude = ''' -/( - \.git - | \.mypy_cache - | \.tox - | \.venv - | \.vscode - | \.eggs - | _build - | buck-out - | build - | dist - | docs -)/ -''' - -# Copy ruff settings from pandas -[tool.ruff] -line-length = 100 -target-version = "py310" -# fix = true -unfixable = [] - -select = [ - # isort - "I", - # pyflakes - "F", - # pycodestyle - "E", "W", - # flake8-2020 - "YTT", - # flake8-bugbear - "B", - # flake8-quotes - "Q", - # flake8-debugger - "T10", - # flake8-gettext - "INT", - # pylint - "PLC", "PLE", "PLR", "PLW", - # misc lints - "PIE", - # flake8-pyi - "PYI", - # tidy imports - "TID", - # implicit string concatenation - "ISC", - # type-checking imports - "TCH", - # comprehensions - "C4", - # pygrep-hooks - "PGH" -] - -ignore = [ - # space before : (needed for how black formats slicing) - # "E203", # not yet implemented - # module level import not at top of file - "E402", - # do not assign a lambda expression, use a def - "E731", - # line break before binary operator - # "W503", # not yet implemented - # line break after binary operator - # "W504", # not yet implemented - # controversial - "B006", - # controversial?: Loop control variable not used within loop body - # "B007", - # controversial - "B008", - # setattr is used to side-step mypy - "B009", - # getattr is used to side-step mypy - "B010", - # tests use assert False - "B011", - # tests use comparisons but not their returned value - "B015", - # false positives - "B019", - # Loop control variable overrides iterable it iterates - "B020", - # Function definition does not bind loop variable - "B023", - # No explicit `stacklevel` keyword argument found - "B028", - # Functions defined inside a loop must not use variables redefined in the loop - # "B301", # not yet implemented - # Only works with python >=3.10 - "B905", - # Too many arguments to function call - "PLR0913", - # Too many returns - "PLR0911", - # Too many branches - "PLR0912", - # Too many statements - "PLR0915", - # Redefined loop name - "PLW2901", - # Global statements are discouraged - "PLW0603", - # Docstrings should not be included in stubs - "PYI021", - # No builtin `eval()` allowed - "PGH001", - # compare-to-empty-string - "PLC1901", - # Use typing_extensions.TypeAlias for type aliases - # "PYI026", # not yet implemented - # Use "collections.abc.*" instead of "typing.*" (PEP 585 syntax) - # "PYI027", # not yet implemented - # while int | float can be shortened to float, the former is more explicit - # "PYI041", # not yet implemented - - # Additional checks that don't pass yet - # Useless statement - "B018", - # Within an except clause, raise exceptions with ... - "B904", - # Magic number - "PLR2004", - # Consider `elif` instead of `else` then `if` to remove indentation level - "PLR5501", -] - -exclude = [ - "docs/", -] diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 0000000..62bd58b --- /dev/null +++ b/ruff.toml @@ -0,0 +1,71 @@ +# Copied originally from pandas +target-version = "py310" + +# fix = true +unfixable = [] + +select = [ + "I", # isort + "F", # pyflakes + "E", "W", # pycodestyle + "YTT", # flake8-2020 + "B", # flake8-bugbear + "Q", # flake8-quotes + "T10", # flake8-debugger + "INT", # flake8-gettext + "PLC", "PLE", "PLR", "PLW", # pylint + "PIE", # misc lints + "PYI", # flake8-pyi + "TID", # tidy imports + "ISC", # implicit string concatenation + "TCH", # type-checking imports + "C4", # comprehensions + "PGH" # pygrep-hooks +] + +# Some additional rules that are useful +extend-select = [ +"UP009", # UTF-8 encoding declaration is unnecessary +"SIM118", # Use `key in dict` instead of `key in dict.keys()` +"D205", # One blank line required between summary line and description +"ARG001", # Unused function argument +"RSE102", # Unnecessary parentheses on raised exception +"PERF401", # Use a list comprehension to create a transformed list +] + +ignore = [ + "ISC001", # Disable this for compatibility with ruff format + "B028", # No explicit `stacklevel` keyword argument found + "B905", # `zip()` without an explicit `strict=` parameter + "E402", # module level import not at top of file + "E731", # do not assign a lambda expression, use a def + "PLC1901", # compare-to-empty-string + "PLR0911", # Too many returns + "PLR0912", # Too many branches + "PLR0913", # Too many arguments to function call + "PLR0915", # Too many statements + "PLR2004", # Magic number +] + +# TODO : fix these and stop ignoring. Commented out ones are common and OK to except. +extend-ignore = [ + "PGH004", # Use specific rule codes when using `noqa` +# "C401", # Unnecessary generator (rewrite as a `set` comprehension) +# "C402", # Unnecessary generator (rewrite as a dict comprehension) +# "C405", # Unnecessary `list` literal (rewrite as a `set` literal) +# "C408", # Unnecessary `dict` call (rewrite as a literal) +# "C416", # Unnecessary `dict` comprehension (rewrite using `dict()`) +# "PGH002", # warn is deprecated in favor of warning +# "PYI056", # Calling `.append()` on `__all__` may not be supported by all type checkers +] + +extend-exclude = [ + "docs", +] + +[pycodestyle] +max-line-length = 100 # E501 reports lines that exceed the length of 100. + +[lint.extend-per-file-ignores] +"__init__.py" = ["E402", "F401", "F403"] +"**/tests/test_*.py" = ["D205"] # Don't worry about test docstrings From 91afb08c336ffb1e6c3f3446cd84d04a0acbb227 Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Mon, 13 Nov 2023 09:08:00 -0500 Subject: [PATCH 2/4] ruff format --- sparkles/tests/test_checks.py | 50 +++++++++++++++++------------------ 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/sparkles/tests/test_checks.py b/sparkles/tests/test_checks.py index 0e4134a..2a0280c 100644 --- a/sparkles/tests/test_checks.py +++ b/sparkles/tests/test_checks.py @@ -52,7 +52,7 @@ def test_check_P2(): **mod_std_info(n_fid=0, n_guide=8, obsid=50000), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_acq_p2() @@ -71,7 +71,7 @@ def test_n_guide_check_not_enough_stars(): **mod_std_info(n_fid=3, n_guide=5, obsid=5000), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -93,7 +93,7 @@ def test_guide_is_candidate(): stars=stars, dark=DARK40, include_ids_guide=[100], - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_catalog() @@ -116,7 +116,7 @@ def test_n_guide_check_atypical_request(): **mod_std_info(n_fid=3, n_guide=4, obsid=5000), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -137,7 +137,7 @@ def test_n_guide_mon_check_atypical_request(): **mod_std_info(n_fid=2, n_guide=6, obsid=5000, monitors=monitors), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -193,7 +193,7 @@ def test_n_guide_too_few_guide_or_mon(): **mod_std_info(n_fid=2, n_guide=6, obsid=5000, monitors=monitors), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -222,7 +222,7 @@ def test_guide_count_er1(): **mod_std_info(n_fid=0, n_guide=8, obsid=50000), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -240,7 +240,7 @@ def test_guide_count_er2(): **mod_std_info(n_fid=0, n_guide=8, obsid=50000), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -259,7 +259,7 @@ def test_guide_count_er3(): **mod_std_info(obsid=50000, n_fid=0, n_guide=8), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -280,7 +280,7 @@ def test_guide_count_er4(): **mod_std_info(obsid=50000, n_fid=0, n_guide=8), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -302,7 +302,7 @@ def test_include_exclude(): exclude_ids_guide=[100, 101], include_ids_acq=[106, 107], include_halfws_acq=[140, 120], - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_include_exclude() @@ -328,7 +328,7 @@ def test_guide_count_er5(): **mod_std_info(obsid=50000, n_fid=0, n_guide=8), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -346,7 +346,7 @@ def test_guide_count_or(): **mod_std_info(n_fid=3, n_guide=5, obsid=1), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -366,7 +366,7 @@ def test_ok_number_bright_guide_stars(): **mod_std_info(n_fid=3, n_guide=5, obsid=1), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -386,7 +386,7 @@ def test_too_many_bright_stars(): **mod_std_info(n_fid=3, n_guide=5, obsid=1), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_guide_count() @@ -407,7 +407,7 @@ def test_low_guide_count(): **mod_std_info(n_fid=3, n_guide=5, obsid=1), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) # Confirm the guide_count is in the range we want for the test to be valid @@ -432,7 +432,7 @@ def test_low_guide_count_creep_away(): **mod_std_info(n_fid=3, n_guide=5, obsid=1, man_angle_next=5.0), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) # Confirm the guide_count is in the range we want for the test to be valid @@ -456,7 +456,7 @@ def test_reduced_dither_low_guide_count(): **mod_std_info(n_fid=3, n_guide=5, obsid=1, dyn_bgd_n_faint=0, dither=(4, 4)), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) @@ -518,7 +518,7 @@ def test_not_reduced_dither_low_guide_count(): **mod_std_info(n_fid=3, n_guide=5, obsid=1, dyn_bgd_n_faint=0, dither=(8, 8)), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) @@ -546,7 +546,7 @@ def test_not_reduced_dither_low_guide_count_dyn_bgd(): **mod_std_info(n_fid=3, n_guide=5, obsid=1, dyn_bgd_n_faint=1, dither=(8, 8)), stars=stars, dark=DARK40, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) # Confirm the guide_count is in the range we want for the test to be valid @@ -574,7 +574,7 @@ def test_pos_err_on_guide(): stars=stars, dark=DARK40, raise_exc=True, - include_ids_guide=[100, 101] + include_ids_guide=[100, 101], ) # Must force 100, 101, pos_err too big acar = ACAReviewTable(aca) @@ -607,7 +607,7 @@ def test_guide_overlap(): stars=stars, dark=DARK40, raise_exc=True, - include_ids_guide=[1, 2] + include_ids_guide=[1, 2], ) assert 2 in aca.guides["id"] assert 1 in aca.guides["id"] @@ -644,7 +644,7 @@ def test_guide_edge_check(): stars=stars, dark=DARK40, raise_exc=True, - include_ids_guide=np.arange(1, 7) + include_ids_guide=np.arange(1, 7), ) acar = ACAReviewTable(aca) acar.check_catalog() @@ -694,7 +694,7 @@ def test_imposters_on_guide(exp_warn): **mod_std_info(n_fid=0, n_guide=8), stars=stars, dark=dark_with_badpix, - raise_exc=True + raise_exc=True, ) acar = ACAReviewTable(aca) acar.check_imposters_guide(aca.guides.get_id(110)) @@ -716,7 +716,7 @@ def test_bad_star_set(proseco_agasc_1p7): aca = get_aca_catalog( **mod_std_info(n_fid=0, att=(ra, dec, 0)), dark=DARK40, - include_ids_guide=[bad_id] + include_ids_guide=[bad_id], ) acar = ACAReviewTable(aca) acar.check_catalog() From fd3d5462e923089c3d96bc3d8355bd3c01a1dc4a Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Mon, 13 Nov 2023 09:34:53 -0500 Subject: [PATCH 3/4] Fix ruff check warnings --- ruff.toml | 6 +++++- sparkles/core.py | 28 +++++++++++++++------------- sparkles/find_er_catalog.py | 7 +------ sparkles/roll_optimize.py | 20 ++++++++++++-------- sparkles/tests/test_checks.py | 1 - sparkles/tests/test_review.py | 2 +- sparkles/yoshi.py | 8 ++++---- 7 files changed, 38 insertions(+), 34 deletions(-) diff --git a/ruff.toml b/ruff.toml index 62bd58b..8283808 100644 --- a/ruff.toml +++ b/ruff.toml @@ -68,4 +68,8 @@ max-line-length = 100 # E501 reports lines that exceed the length of 100. [lint.extend-per-file-ignores] "__init__.py" = ["E402", "F401", "F403"] -"**/tests/test_*.py" = ["D205"] # Don't worry about test docstrings +# For tests: +# - D205: Don't worry about test docstrings +# - ARG001: Unused function argument false positives for some fixtures +"**/tests/test_*.py" = ["D205", "ARG001"] + diff --git a/sparkles/core.py b/sparkles/core.py index 584940e..6f2600d 100644 --- a/sparkles/core.py +++ b/sparkles/core.py @@ -1,4 +1,3 @@ -# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ @@ -1175,8 +1174,8 @@ def check_catalog(self): self.check_include_exclude() def check_guide_overlap(self): - """ - Check for overlapping tracked items. + """Check for overlapping tracked items. + Overlap is defined as within 12 pixels. """ ok = np.in1d(self["type"], ("GUI", "BOT", "FID", "MON")) @@ -1266,8 +1265,9 @@ def check_guide_fid_position_on_ccd(self, entry): col_lim = ACA.max_ccd_col - ACA.CCD["window_pad"] def sign(axis): - """Return sign of the corresponding entry value. Note that np.sign returns 0 - if the value is 0.0, not the right thing here. + """Return sign of the corresponding entry value. + + Note that np.sign returns 0 if the value is 0.0, not the right thing here. """ return -1 if (entry[axis] < 0) else 1 @@ -1445,9 +1445,10 @@ def check_guide_count(self): # Add a check that for ORs with guide count between 3.5 and 4.0, the # dither is 4 arcsec if dynamic background not enabled. def check_dither(self): - """ - Check dither. This presently checks that dither is 4x4 arcsec if - dynamic background is not in use and the field has a low guide_count. + """Check dither. + + This presently checks that dither is 4x4 arcsec if dynamic background is not in + use and the field has a low guide_count. """ # Skip check if guide_count is 4.0 or greater @@ -1488,11 +1489,12 @@ def check_imposters_guide(self, star): # Borrow the imposter offset method from starcheck def imposter_offset(cand_mag, imposter_mag): - """ - For a given candidate star and the pseudomagnitude of the brightest 2x2 imposter - calculate the max offset of the imposter counts are at the edge of the 6x6 - (as if they were in one pixel). This is somewhat the inverse of - proseco.get_pixmag_for_offset . + """Get imposter offset. + + For a given candidate star and the pseudomagnitude of the brightest 2x2 + imposter calculate the max offset of the imposter counts are at the edge of + the 6x6 (as if they were in one pixel). This is somewhat the inverse of + proseco.get_pixmag_for_offset. """ cand_counts = mag_to_count_rate(cand_mag) spoil_counts = mag_to_count_rate(imposter_mag) diff --git a/sparkles/find_er_catalog.py b/sparkles/find_er_catalog.py index 999b322..6e0d0c5 100644 --- a/sparkles/find_er_catalog.py +++ b/sparkles/find_er_catalog.py @@ -235,12 +235,7 @@ def convert_atts_to_list_of_quats(atts): if isinstance(atts, Quat): out = [Quat(q) for q in atts.q.reshape(-1, 4)] else: - out = [] - # Assume atts is a flat list of Quats or Quat-compatible objects - for att in atts: - if not isinstance(att, Quat): - att = Quat(att) - out.append(att) + out = [(att if isinstance(att, Quat) else Quat(att)) for att in atts] return out diff --git a/sparkles/roll_optimize.py b/sparkles/roll_optimize.py index 73b1df3..5e6b836 100644 --- a/sparkles/roll_optimize.py +++ b/sparkles/roll_optimize.py @@ -1,4 +1,3 @@ -# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ @@ -23,8 +22,8 @@ def logical_intervals(vals, x=None): - """ - Determine contiguous intervals during which ``vals`` is True. + """Determine contiguous intervals during which ``vals`` is True. + Returns an Astropy Table with a row for each interval. Columns are: * idx_start: index of interval start * idx_stop: index of interval stop @@ -59,6 +58,7 @@ def logical_intervals(vals, x=None): class RollOptimizeMixin: def get_candidate_better_stars(self): """Find stars that *might* substantially improve guide or acq catalogs. + Get stars that might be candidates at a different roll. This takes stars outside the original square CCD FOV (but made smaller by 40 pixels) and inside a circle corresponding to the box corners (but made @@ -66,8 +66,8 @@ def get_candidate_better_stars(self): originally excluded because of dither size etc are considered. :returns: list of indexes into self.stars """ - # First define a spatial mask ``sp_ok`` on ``stars`` that is the - # region (mentioned above) between an inner square and outer circle. + # First define a spatial mask ``sp_ok`` on ``stars`` that is the region + # (mentioned above) between an inner square and outer circle. rc_pad = 40 stars = self.stars in_fov = (np.abs(stars["row"]) < CCD["row_max"] - rc_pad) & ( @@ -123,6 +123,7 @@ def get_roll_intervals( max_roll_dev=None, ): """Find a list of rolls that might substantially improve guide or acq catalogs. + If ``roll_nom`` is not specified then an approximate value is computed via ska_sun for the catalog ``date``. if ``roll_dev`` (max allowed off-nominal roll) is not specified it is computed using the OFLS table. @@ -276,9 +277,10 @@ def _get_roll_intervals_uniform( def _get_roll_intervals_uniq_ids( ids0, ids_list, roll, roll_min, roll_max, roll_offsets, d_roll ): - """Private method to get roll intervals that span a range where there is a unique - set of available candidate stars within the entire interval. + """Get roll intervals. + Private method to get roll intervals that span a range where there is a unique + set of available candidate stars within the entire interval. """ # Get all unique sets of stars that are in the FOV over the sampled # roll offsets. Ignore ids sets that do not add new candidate stars. @@ -426,7 +428,9 @@ def get_roll_options( self.roll_options = roll_options def sort_and_limit_roll_options(self, roll_level, max_roll_options): - """Sort the roll options based on two keys: + """Sort the roll options based on two keys. + + Keys are: - Number of warnings at roll_level or worse (e.g. number of criticals, so smaller is better) - Negative of improvement (larger improvement is better) diff --git a/sparkles/tests/test_checks.py b/sparkles/tests/test_checks.py index 2a0280c..a2bc50c 100644 --- a/sparkles/tests/test_checks.py +++ b/sparkles/tests/test_checks.py @@ -1,4 +1,3 @@ -# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import pickle diff --git a/sparkles/tests/test_review.py b/sparkles/tests/test_review.py index 95f137a..097c3bb 100644 --- a/sparkles/tests/test_review.py +++ b/sparkles/tests/test_review.py @@ -546,7 +546,7 @@ def compare_intervs(intervs, exp_intervs): assert len(intervs) == len(exp_intervs) for interv, exp_interv in zip(intervs, exp_intervs): assert interv.keys() == exp_interv.keys() - for key in interv.keys(): + for key in interv: if key.startswith("roll"): assert np.isclose(interv[key], exp_interv[key], atol=1e-6, rtol=0) else: diff --git a/sparkles/yoshi.py b/sparkles/yoshi.py index 90a5615..64b34cd 100644 --- a/sparkles/yoshi.py +++ b/sparkles/yoshi.py @@ -8,7 +8,8 @@ def get_yoshi_params_from_ocat(obsid, obs_date=None, web_ocat=True, cycle=None): - """ + """Get yoshi parameters from the OCAT for use in proseco / sparkles. + For an obsid in the OCAT, fetch params from OCAT and define a few defaults for the standard info needed to get an ACA attitude and run yoshi / proseco / sparkles. @@ -108,9 +109,8 @@ def run_one_yoshi( man_angle, **kwargs, ): - """ - Run proseco and sparkles for an observation request in a roll/temperature/man_angle - scenario. + """Run proseco and sparkles for an OR in a roll/temperature/man_angle scenario. + :param obsid: obsid :param detector: detector (ACIS-I|ACIS-S|HRC-I|HRC-S) :param chipx: chipx from zero-offset aimpoint table entry for obsid From b35efab9242e0584d14c3bbb55140c0895c722fe Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Mon, 13 Nov 2023 09:39:14 -0500 Subject: [PATCH 4/4] Remove ruff extend-ignore options --- ruff.toml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/ruff.toml b/ruff.toml index 8283808..238fc52 100644 --- a/ruff.toml +++ b/ruff.toml @@ -47,18 +47,6 @@ ignore = [ "PLR2004", # Magic number ] -# TODO : fix these and stop ignoring. Commented out ones are common and OK to except. -extend-ignore = [ - "PGH004", # Use specific rule codes when using `noqa` -# "C401", # Unnecessary generator (rewrite as a `set` comprehension) -# "C402", # Unnecessary generator (rewrite as a dict comprehension) -# "C405", # Unnecessary `list` literal (rewrite as a `set` literal) -# "C408", # Unnecessary `dict` call (rewrite as a literal) -# "C416", # Unnecessary `dict` comprehension (rewrite using `dict()`) -# "PGH002", # warn is deprecated in favor of warning -# "PYI056", # Calling `.append()` on `__all__` may not be supported by all type checkers -] - extend-exclude = [ "docs", ]