Skip to content

Commit

Permalink
feat(ruff): enable mccabe and pep8-naming rules
Browse files Browse the repository at this point in the history
  • Loading branch information
hongbo-miao committed Jan 7, 2025
1 parent 7ce32ec commit 106980e
Show file tree
Hide file tree
Showing 13 changed files with 63 additions and 61 deletions.
2 changes: 2 additions & 0 deletions .ruff.toml
Original file line number Diff line number Diff line change
Expand Up @@ -172,13 +172,15 @@ exclude = [
[lint]
select = [
"AIR", # Airflow
"C90", # mccabe
"COM", # flake8-commas
"E", # pycodestyle
"F", # Pyflakes
"FAST", # FastAPI
"FLY", # flynt
"FURB", # Refurb
"I", # isort
"N", # pep8-naming
"NPY", # NumPy
"PERF", # Perflint
"PGH", # pygrep-hooks
Expand Down
4 changes: 2 additions & 2 deletions aerospace/hm-aerosandbox/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ def main() -> None:
align_trailing_vortices_with_wind=False,
)
aero = vlm.run()
L_over_D = aero["CL"] / aero["CD"]
opti.minimize(-L_over_D)
l_over_d = aero["CL"] / aero["CD"]
opti.minimize(-l_over_d)
sol = opti.solve()
best_alpha = sol(alpha)
logger.info(f"Alpha for max L/D: {best_alpha:.3f} deg")
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import torch.nn.functional as F
import torch.nn.functional as F # noqa: N812 # noqa: N812
from torch import nn


Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging

import torch
import torch.nn.functional as F
import torch.nn.functional as F # noqa: N812
import torch.utils.data
import torch.utils.data.distributed

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

import torch
import torch.distributed as dist
import torch.nn.functional as F
import torch.nn.functional as F # noqa: N812
import torch.utils.data
import torch.utils.data.distributed
from models.net import Net
Expand Down
40 changes: 20 additions & 20 deletions data-analytics/hm-networkx/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@

def create_network() -> nx.Graph:
# Initialize a new undirected graph
G = nx.Graph()
graph = nx.Graph()

# Add nodes
nodes = ["A", "B", "C", "D", "E"]
G.add_nodes_from(nodes)
graph.add_nodes_from(nodes)

# Add edges with weights
edges = [
Expand All @@ -23,55 +23,55 @@ def create_network() -> nx.Graph:
("A", "E", 6),
("B", "E", 3),
]
G.add_weighted_edges_from(edges)
return G
graph.add_weighted_edges_from(edges)
return graph


def analyze_network(G: nx.Graph) -> None:
def analyze_network(graph: nx.Graph) -> None:
# Calculate and print basic network metrics
logger.info("Network Analysis:")
logger.info(f"Number of nodes: {G.number_of_nodes()}")
logger.info(f"Number of edges: {G.number_of_edges()}")
logger.info(f"Number of nodes: {graph.number_of_nodes()}")
logger.info(f"Number of edges: {graph.number_of_edges()}")

# Calculate degree for each node
logger.info("Node degrees:")
for node in G.nodes():
logger.info(f"Node {node}: {G.degree(node)}")
for node in graph.nodes():
logger.info(f"Node {node}: {graph.degree(node)}")

# Calculate betweenness centrality
betweenness = nx.betweenness_centrality(G)
betweenness = nx.betweenness_centrality(graph)
logger.info("Betweenness centrality:")
for node, bc in betweenness.items():
logger.info(f"Node {node}: {bc:.3f}")

# Calculate shortest paths
logger.info("Shortest paths from node A:")
for target in G.nodes():
for target in graph.nodes():
if target != "A":
path = nx.shortest_path(G, "A", target, weight="weight")
distance = nx.shortest_path_length(G, "A", target, weight="weight")
path = nx.shortest_path(graph, "A", target, weight="weight")
distance = nx.shortest_path_length(graph, "A", target, weight="weight")
logger.info(f"A to {target}: {path} (distance: {distance})")


def visualize_network(G: nx.Graph) -> None:
def visualize_network(graph: nx.Graph) -> None:
"""
Create and display a visualization of the network.
"""
plt.figure(figsize=(10, 8))
pos = nx.spring_layout(G)
pos = nx.spring_layout(graph)

# Draw nodes
nx.draw_networkx_nodes(G, pos, node_color="lightblue", node_size=500)
nx.draw_networkx_nodes(graph, pos, node_color="lightblue", node_size=500)

# Draw edges
nx.draw_networkx_edges(G, pos)
nx.draw_networkx_edges(graph, pos)

# Draw labels
nx.draw_networkx_labels(G, pos)
nx.draw_networkx_labels(graph, pos)

# Draw edge labels
edge_labels = nx.get_edge_attributes(G, "weight")
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
edge_labels = nx.get_edge_attributes(graph, "weight")
nx.draw_networkx_edge_labels(graph, pos, edge_labels=edge_labels)

plt.title("Network Graph")
plt.axis("off")
Expand Down
34 changes: 17 additions & 17 deletions high-performance-computing/hm-jax/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@


# The linear model
def predict(w: float, b: float, X: jnp.ndarray) -> jnp.ndarray:
return w * X + b
def predict(w: float, b: float, x: jnp.ndarray) -> jnp.ndarray:
return w * x + b


# The loss function (mean squared error)
def loss_fn(w: float, b: float, X: jnp.ndarray, y: jnp.ndarray) -> float:
predictions = predict(w, b, X)
def loss_fn(w: float, b: float, x: jnp.ndarray, y: jnp.ndarray) -> float:
predictions = predict(w, b, x)
return jnp.mean((predictions - y) ** 2)


Expand All @@ -23,11 +23,11 @@ def loss_fn(w: float, b: float, X: jnp.ndarray, y: jnp.ndarray) -> float:
def update(
w: float,
b: float,
X: jnp.ndarray,
x: jnp.ndarray,
y: jnp.ndarray,
learning_rate: float,
) -> tuple[float, float]:
dw, db = grad(loss_fn, argnums=(0, 1))(w, b, X, y)
dw, db = grad(loss_fn, argnums=(0, 1))(w, b, x, y)
w -= learning_rate * dw
b -= learning_rate * db
return w, b
Expand All @@ -36,18 +36,18 @@ def update(
def main() -> None:
# Generate synthetic data
key = random.PRNGKey(0)
N = 100 # number of data points
X = random.normal(key, (N,)) # features
n = 100 # number of data points
x = random.normal(key, (n,)) # features

# Adding even more non-linearity and varying noise
true_w, true_b = 2.0, -1.0 # true weights for the linear model
y = (
true_w * X
true_w * x
+ true_b
+ jnp.sin(X) * 0.5
+ jnp.cos(2 * X) * 0.3 # additional non-linear component
+ random.normal(key, (N,)) * jnp.abs(X) * 0.5 # varying noise based on X
+ random.normal(key, (N,)) * 2.0 # additional noise for more randomness
+ jnp.sin(x) * 0.5
+ jnp.cos(2 * x) * 0.3 # additional non-linear component
+ random.normal(key, (n,)) * jnp.abs(x) * 0.5 # varying noise based on X
+ random.normal(key, (n,)) * 2.0 # additional noise for more randomness
)

# Initialize weights
Expand All @@ -61,17 +61,17 @@ def main() -> None:
# Train
losses = []
for i in range(num_iterations):
w, b = update(w, b, X, y, learning_rate)
current_loss = loss_fn(w, b, X, y)
w, b = update(w, b, x, y, learning_rate)
current_loss = loss_fn(w, b, x, y)
losses.append(current_loss)
if i % 10 == 0:
logger.info(
f"Iteration {i}: loss = {current_loss:.4f}, w = {w:.4f}, b = {b:.4f}",
)

# Plot the results
plt.plot(X, y, "bo", label="Data")
plt.plot(X, predict(w, b, X), "r-", label="Fitted Line")
plt.plot(x, y, "bo", label="Data")
plt.plot(x, predict(w, b, x), "r-", label="Fitted Line")
plt.legend()
plt.title(f"Linear Regression: w = {w:.2f}, b = {b:.2f}")
plt.show()
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import torch
import torch.nn.functional as F
import torch.nn.functional as F # noqa: N812
from torch import nn


Expand Down
8 changes: 4 additions & 4 deletions machine-learning/dali/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ def get_num_samples(data_path: Path) -> int:


def main() -> None:
BATCH_SIZE: int = 2
NUM_THREADS: int = 2
batch_size: int = 2
num_threads: int = 2

# Create data directory and download sample images
data_path = Path("data")
Expand All @@ -75,8 +75,8 @@ def main() -> None:

pipe = image_pipeline(
data_path=data_path,
batch_size=BATCH_SIZE,
num_threads=NUM_THREADS,
batch_size=batch_size,
num_threads=num_threads,
)
pipe.build()

Expand Down
6 changes: 3 additions & 3 deletions machine-learning/graph-neural-network/src/model/conv.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import torch
import torch.nn.functional as F
import torch.nn.functional as F # noqa: N812
from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder
from torch_geometric.nn import MessagePassing, global_add_pool
from torch_geometric.utils import degree
Expand Down Expand Up @@ -79,7 +79,7 @@ def update(self, aggr_out):


# GNN to generate node embedding
class GNN_node(torch.nn.Module):
class GNNNode(torch.nn.Module):
"""
Output:
node representations
Expand Down Expand Up @@ -162,7 +162,7 @@ def forward(self, batched_data):


# Virtual GNN to generate node embedding
class GNN_node_Virtualnode(torch.nn.Module):
class GNNNodeVirtualnode(torch.nn.Module):
"""
Output:
node representations
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
packages_to_install=["torch==2.0.0", "torchvision==0.15.1", "lightning==2.0.5"],
)
def train():
import lightning as L
import lightning as L # noqa: N812
import torch
import torch.nn.functional as F
import torch.nn.functional as F # noqa: N812
import torchvision
from torch import nn
from torch.utils import data
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import lightning as L
import lightning as L # noqa: N812
import mlflow
import torch
import torch.nn.functional as F
import torch.nn.functional as F # noqa: N812
import torchvision
from args import get_args
from lightning.pytorch.loggers.wandb import WandbLogger
Expand Down
14 changes: 7 additions & 7 deletions parallel-computing/hm-triton/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,18 @@ def vector_add_kernel(
y_ptr, # Pointer to second vector
output_ptr, # Pointer to output vector
n_elements, # Number of elements in the vectors
BLOCK_SIZE: tl.constexpr, # Number of elements each program should process
block_size: tl.constexpr, # Number of elements each program should process
) -> None:
# Program ID
pid = tl.program_id(axis=0)

# Calculate the start index for this program instance
block_start = pid * BLOCK_SIZE
block_start = pid * block_size

# Create an offset array for this block
offsets = block_start + tl.arange(0, BLOCK_SIZE)
offsets = block_start + tl.arange(0, block_size)

# Create a mask to handle the case where array size isn't multiple of BLOCK_SIZE
# Create a mask to handle the case where array size isn't multiple of block_size
mask = offsets < n_elements

# Load data using the mask
Expand All @@ -48,18 +48,18 @@ def vector_add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
output: torch.Tensor = torch.empty_like(x)

# Define block size (can be tuned for performance)
BLOCK_SIZE: int = 128
block_size: int = 128

# Calculate grid size
grid: tuple[int, ...] = (triton.cdiv(n_elements, BLOCK_SIZE),)
grid: tuple[int, ...] = (triton.cdiv(n_elements, block_size),)

# Launch kernel
vector_add_kernel[grid](
x, # Triton automatically converts tensor to pointer
y, # Triton automatically converts tensor to pointer
output, # Triton automatically converts tensor to pointer
n_elements,
BLOCK_SIZE,
block_size,
)

return output
Expand Down

0 comments on commit 106980e

Please sign in to comment.