Skip to content

Commit

Permalink
feat(ruff): enable Refurb and Ruff rules (#22385)
Browse files Browse the repository at this point in the history
  • Loading branch information
hongbo-miao authored Jan 7, 2025
1 parent ee1d532 commit b001dc0
Show file tree
Hide file tree
Showing 5 changed files with 12 additions and 10 deletions.
2 changes: 2 additions & 0 deletions .ruff.toml
Original file line number Diff line number Diff line change
Expand Up @@ -175,10 +175,12 @@ select = [
"E", # pycodestyle
"F", # Pyflakes
"FAST", # FastAPI
"FURB", # Refurb
"I", # isort
"PERF", # Perflint
"PGH", # pygrep-hooks
"PL", # Pylint
"RUF", # Ruff
"TRY", # tryceratops
"UP", # pyupgrade
]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# https://github.com/permitio/opal-fetcher-postgres

import json
from typing import Any, ClassVar

import asyncpg
from asyncpg.exceptions import DataError
Expand Down Expand Up @@ -38,12 +39,11 @@ class PostgresFetchEvent(FetchEvent):


class PostgresFetchProvider(BaseFetchProvider):
RETRY_CONFIG = {
RETRY_CONFIG: ClassVar[dict[str, Any]] = {
"wait": wait.wait_random_exponential(),
"stop": stop.stop_after_attempt(10),
"retry": retry_unless_exception_type(
DataError
), # query error (i.e: invalid table, etc)
# query error (i.e: invalid table, etc)
"retry": retry_unless_exception_type(DataError),
"reraise": True,
}

Expand Down
2 changes: 1 addition & 1 deletion data-storage/lance/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def main() -> None:
# nprobes:
# The number of probes determines the distribution of vector space.
# While a higher number enhances search accuracy, it also results in slower performance.
# Typically, setting nprobes to cover 510% of the dataset proves effective in achieving high recall with minimal latency.
# Typically, setting nprobes to cover 5 - 10% of the dataset proves effective in achieving high recall with minimal latency.
#
# refine_factor:
# Refine the results by reading extra elements and re-ranking them in memory.
Expand Down
2 changes: 1 addition & 1 deletion machine-learning/feature-store/driver_features/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# production, you can use your favorite DWH, such as BigQuery. See Feast documentation
# for more info.
driver_hourly_stats = FileSource(
path="/Users/hongbo-miao/Clouds/Git/hongbomiao.com/machine-learning/feature-store/driver_features/data/driver_stats.parquet", # noqa: E501
path="/Users/hongbo-miao/Clouds/Git/hongbomiao.com/machine-learning/feature-store/driver_features/data/driver_stats.parquet",
timestamp_field="event_timestamp",
created_timestamp_column="created",
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,8 @@ def retrieve_context(
relevant_chunks = [chunks[idx] for idx in indices[0]]
state["context"] = "\n".join(relevant_chunks)
logger.info("Context retrieval completed")
except Exception as e:
logger.error(f"Error in retrieve_context: {str(e)}", exc_info=True)
except Exception:
logger.exception("Error in retrieve_context.")
raise
else:
return state
Expand All @@ -128,8 +128,8 @@ def generate_answer(state: MessagesState) -> MessagesState:
],
)
state["answer"] = response.choices[0].message.content
except Exception as e:
logger.error(f"Error in generate_answer: {str(e)}", exc_info=True)
except Exception:
logger.exception("Error in generate_answer.")
raise
else:
return state
Expand Down

0 comments on commit b001dc0

Please sign in to comment.