Skip to content

Commit

Permalink
feat(ruff): enable flake8-commas (#22386)
Browse files Browse the repository at this point in the history
  • Loading branch information
hongbo-miao authored Jan 7, 2025
1 parent 96a8bfe commit 2c0e106
Show file tree
Hide file tree
Showing 97 changed files with 460 additions and 223 deletions.
1 change: 1 addition & 0 deletions .ruff.toml
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,7 @@ exclude = [
[lint]
select = [
"AIR", # Airflow
"COM", # flake8-commas
"E", # pycodestyle
"F", # Pyflakes
"FAST", # FastAPI
Expand Down
5 changes: 3 additions & 2 deletions aerospace/hm-aerosandbox/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def main() -> None:
)
for xi in np.cosspace(0, 1, 30)
],
)
),
],
)
vlm = asb.VortexLatticeMethod(
Expand Down Expand Up @@ -186,6 +186,7 @@ def main() -> None:

if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
)
main()
9 changes: 6 additions & 3 deletions aerospace/hm-openaerostruct/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,11 +80,13 @@ def main() -> None:

# Perform the connections with the modified names within the 'aero_states' group
problem.model.connect(
name + ".mesh", point_name + ".aero_states." + name + "_def_mesh"
name + ".mesh",
point_name + ".aero_states." + name + "_def_mesh",
)

problem.model.connect(
name + ".t_over_c", point_name + "." + name + "_perf." + "t_over_c"
name + ".t_over_c",
point_name + "." + name + "_perf." + "t_over_c",
)

# Import the Scipy Optimizer and set the driver of the problem to use it, which defaults to an SLSQP optimization method
Expand All @@ -105,6 +107,7 @@ def main() -> None:

if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
)
main()
3 changes: 2 additions & 1 deletion api-rust/scripts/download_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ def download_resnet18():

if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
)
download_resnet18()
download_labels()
14 changes: 10 additions & 4 deletions authorization/hm-opal-client/opal_fetcher_postgres/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,14 @@ class PostgresConnectionParams(BaseModel):
class PostgresFetcherConfig(FetcherConfig):
fetcher: str = "PostgresFetchProvider"
connection_params: PostgresConnectionParams | None = Field(
None, description="can be overridden or complement parts of the DSN"
None,
description="can be overridden or complement parts of the DSN",
)
query: str = Field(..., description="the query")
fetch_one: bool = Field(False, description="fetch only one row")
dict_key: str | None = Field(
None, description="array of dict will map to dict with provided dict_key"
None,
description="array of dict will map to dict with provided dict_key",
)


Expand Down Expand Up @@ -68,11 +70,15 @@ async def __aenter__(self):
)

self._connection: asyncpg.Connection = await asyncpg.connect(
dsn, **connection_params
dsn,
**connection_params,
)

await self._connection.set_type_codec(
"jsonb", encoder=json.dumps, decoder=json.loads, schema="pg_catalog"
"jsonb",
encoder=json.dumps,
decoder=json.loads,
schema="pg_catalog",
)

self._transaction: Transaction = self._connection.transaction(readonly=True)
Expand Down
3 changes: 2 additions & 1 deletion cloud-computing/hm-ray/applications/calculate/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ def sum_list(numbers: list[int]) -> int:

if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
)
ray.init()
logger.info(ray.cluster_resources())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def process_flight_data(
experiment_number: int,
) -> pd.DataFrame:
mlflow.set_tracking_uri(
f"https://{mlflow_tracking_server_user_name}:{mlflow_tracking_server_password}@{mlflow_tracking_server_host}"
f"https://{mlflow_tracking_server_user_name}:{mlflow_tracking_server_password}@{mlflow_tracking_server_host}",
)
mlflow.set_experiment(mlflow_experiment_name)

Expand All @@ -39,7 +39,8 @@ def process_flight_data(

if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
)

ray.init()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,13 @@
from pyspark.sql.functions import col, concat, date_format, from_unixtime, lit, when

logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)

raw_parquet_paths = [
"s3://hm-production-bucket/data/raw-parquet/adsb_2x_flight_trace_data/"
"s3://hm-production-bucket/data/raw-parquet/adsb_2x_flight_trace_data/",
]
delta_table_path = (
"s3://hm-production-bucket/data/delta-tables/adsb_2x_flight_trace_data/"
Expand Down Expand Up @@ -46,18 +47,23 @@ def add_time_column(


def add_date_column(
df: DataFrame, time_column_name: str, date_column_name: str
df: DataFrame,
time_column_name: str,
date_column_name: str,
) -> DataFrame:
return df.withColumn(
date_column_name,
date_format(
from_unixtime(col(time_column_name) / lit(1000000000.0)), "yyyy-MM-dd"
from_unixtime(col(time_column_name) / lit(1000000000.0)),
"yyyy-MM-dd",
),
)


def add_dbflags_columns(
df: DataFrame, flag_column_name: str, columns_and_masks: list[tuple[str, int]]
df: DataFrame,
flag_column_name: str,
columns_and_masks: list[tuple[str, int]],
) -> DataFrame:
for column_name, mask in columns_and_masks:
df = df.withColumn(
Expand All @@ -68,7 +74,9 @@ def add_dbflags_columns(


def add_trace_flags_columns(
df: DataFrame, flag_column_name: str, columns_and_masks: list[tuple[str, int]]
df: DataFrame,
flag_column_name: str,
columns_and_masks: list[tuple[str, int]],
) -> DataFrame:
for column_name, mask in columns_and_masks:
if column_name in [
Expand All @@ -78,14 +86,15 @@ def add_trace_flags_columns(
df = df.withColumn(
column_name,
when(
(col(flag_column_name).bitwiseAND(mask)) > 0, "geometric"
(col(flag_column_name).bitwiseAND(mask)) > 0,
"geometric",
).otherwise("barometric"),
)
else:
df = df.withColumn(
column_name,
when((col(flag_column_name).bitwiseAND(mask)) > 0, True).otherwise(
False
False,
),
)
return df
Expand All @@ -101,7 +110,8 @@ def add_trace_on_ground_column(
df = df.withColumn(
trace_on_ground_column_name,
when(
col(trace_altitude_ft_column_name) == lit(ground_value), True
col(trace_altitude_ft_column_name) == lit(ground_value),
True,
).otherwise(False),
)
return df
Expand Down Expand Up @@ -235,7 +245,10 @@ def add_coordinate_column(
# Add "_coordinate"
coordinate_column_name = "_coordinate"
df = add_coordinate_column(
df, "trace_longitude_deg", "trace_latitude_deg", coordinate_column_name
df,
"trace_longitude_deg",
"trace_latitude_deg",
coordinate_column_name,
)

# Add "_time"
Expand All @@ -251,7 +264,7 @@ def add_coordinate_column(
"mergeSchema": "true",
}
df.write.format("delta").options(**additional_options).partitionBy(*partitions).mode(
"append"
"append",
).save()

job.commit()
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
from pyspark.context import SparkContext

logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -46,7 +47,7 @@
"mergeSchema": "true",
}
df.write.format("delta").options(**additional_options).partitionBy(*partitions).mode(
"overwrite"
"overwrite",
).save()

job.commit()
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
" [\n",
" torchvision.transforms.transforms.ToTensor(),\n",
" torchvision.transforms.transforms.Normalize((0.1307,), (0.3081,)),\n",
" ]\n",
" ],\n",
" ),\n",
")"
]
Expand All @@ -51,7 +51,9 @@
"source": [
"# Upload the data to S3\n",
"data_s3_uri = sagemaker_session.upload_data(\n",
" path=\"data/\", bucket=s3_bucket, key_prefix=f\"{experiment_name}-data\"\n",
" path=\"data/\",\n",
" bucket=s3_bucket,\n",
" key_prefix=f\"{experiment_name}-data\",\n",
")\n",
"print(data_s3_uri)"
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def get_test_data_loader(test_batch_size, training_dir, **kwargs):
training_dir,
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))],
),
),
batch_size=test_batch_size,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def get_train_data_loader(batch_size, training_dir, is_distributed, **kwargs):
training_dir,
train=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))],
),
)
train_sampler = (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,5 +24,5 @@ def test(model, test_loader, device):

test_loss /= len(test_loader.dataset)
logger.info(
f"Test set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({100.0 * correct / len(test_loader.dataset):.0f}%)\n"
f"Test set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({100.0 * correct / len(test_loader.dataset):.0f}%)\n",
)
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,13 @@ def train(args):
host_rank = args.hosts.index(args.current_host)
os.environ["RANK"] = str(host_rank)
dist.init_process_group(
backend=args.backend, rank=host_rank, world_size=world_size
backend=args.backend,
rank=host_rank,
world_size=world_size,
)
logger.info(
f"Initialized the distributed environment: '{args.backend}' backend on {dist.get_world_size()} nodes. "
+ f"Current host rank is {dist.get_rank()}. Number of gpus: {args.num_gpus}"
+ f"Current host rank is {dist.get_rank()}. Number of gpus: {args.num_gpus}",
)

# set the seed for generating random numbers
Expand All @@ -46,16 +48,19 @@ def train(args):
torch.cuda.manual_seed(args.seed)

train_loader = get_train_data_loader(
args.batch_size, args.data_dir, is_distributed, **kwargs
args.batch_size,
args.data_dir,
is_distributed,
**kwargs,
)
test_loader = get_test_data_loader(args.test_batch_size, args.data_dir, **kwargs)

logger.info(
f"Processes {len(train_loader.sampler)}/{len(train_loader.dataset)} ({100.0 * len(train_loader.sampler) / len(train_loader.dataset):.0f}%) of train data"
f"Processes {len(train_loader.sampler)}/{len(train_loader.dataset)} ({100.0 * len(train_loader.sampler) / len(train_loader.dataset):.0f}%) of train data",
)

logger.info(
f"Processes {len(test_loader.sampler)}/{len(test_loader.dataset)} ({100.0 * len(test_loader.sampler) / len(test_loader.dataset):.0f}%) of test data"
f"Processes {len(test_loader.sampler)}/{len(test_loader.dataset)} ({100.0 * len(test_loader.sampler) / len(test_loader.dataset):.0f}%) of test data",
)

model = Net().to(device)
Expand All @@ -82,7 +87,7 @@ def train(args):
optimizer.step()
if batch_idx % args.log_interval == 0:
logger.info(
f"Train Epoch: {epoch} [{batch_idx * len(device_data)}/{len(train_loader.sampler)} ({100.0 * batch_idx / len(train_loader):.0f}%)] Loss: {loss.item():.6f}"
f"Train Epoch: {epoch} [{batch_idx * len(device_data)}/{len(train_loader.sampler)} ({100.0 * batch_idx / len(train_loader):.0f}%)] Loss: {loss.item():.6f}",
)
test(model, test_loader, device)
save_model(model, args.model_dir)
Loading

0 comments on commit 2c0e106

Please sign in to comment.