Skip to content

Commit

Permalink
Tidy code
Browse files Browse the repository at this point in the history
  • Loading branch information
ElliottKasoar committed Sep 28, 2023
1 parent 5e33767 commit 46bbc78
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 52 deletions.
4 changes: 2 additions & 2 deletions examples/2_ResNet18/pt2ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,14 +76,14 @@ def load_torchscript(filename: Optional[str] = "saved_model.pt") -> torch.nn.Mod
# Load model and prepare for saving
# =====================================================

wp_torch = torch.float32
precision = torch.float32

# FPTLIB-TODO
# Load a pre-trained PyTorch model
# Insert code here to load your model as `trained_model`.
# This example assumes my_ml_model has a method `initialize` to load
# architecture, weights, and place in inference mode
trained_model = resnet18.initialize(wp_torch)
trained_model = resnet18.initialize(precision)

# Switch off specific layers/parts of the model that behave
# differently during training and inference.
Expand Down
61 changes: 41 additions & 20 deletions examples/2_ResNet18/resnet18.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,14 @@


# Initialize everything
def initialize(wp: type) -> torch.nn.Module:
def initialize(precision: torch.dtype) -> torch.nn.Module:
"""
Download pre-trained ResNet-18 model and prepare for inference.
Parameters
----------
wp: type
Data type of input tensor.
precision: torch.dtype
Sets the working precision of the model.
Returns
-------
Expand All @@ -23,7 +23,7 @@ def initialize(wp: type) -> torch.nn.Module:
"""

# Set working precision
torch.set_default_dtype(wp)
torch.set_default_dtype(precision)

# Load a pre-trained PyTorch model
print("Loading pre-trained ResNet-18 model...", end="")
Expand All @@ -37,16 +37,16 @@ def initialize(wp: type) -> torch.nn.Module:
return model


def run_model(model: torch.nn.Module, wp: type):
def run_model(model: torch.nn.Module, precision: type) -> None:
"""
Run the pre-trained ResNet-18 with an example image of a dog.
Parameters
----------
model: torch.nn.Module
Pretrained model to run.
wp: type
Data type to save input tensor.
precision: type
NumPy data type to save input tensor.
"""
# Transform image into the form expected by the pre-trained model, using the mean
# and standard deviation from the ImageNet dataset
Expand All @@ -68,12 +68,20 @@ def run_model(model: torch.nn.Module, wp: type):

print("Saving input batch...", end="")
# Transpose input before saving so order consistent with Fortran
np_input = np.array(input_batch.numpy().transpose().flatten(), dtype=wp)
np_input = np.array(
input_batch.numpy().transpose().flatten(), dtype=precision
) # type: np.typing.NDArray

# Save data as binary
np_input.tofile("data/image_tensor.dat")

# Check saved correctly
# Load saved data to check it was saved correctly
np_data = np.fromfile(
"data/image_tensor.dat", dtype=precision
) # type: np.typing.NDArray

# Reshape to original tensor shape
tensor_shape = np.array(input_batch.numpy()).transpose().shape
np_data = np.fromfile("data/image_tensor.dat", dtype=wp)
np_data = np_data.reshape(tensor_shape)
np_data = np_data.transpose()
assert np.array_equal(np_data, input_batch.numpy()) is True
Expand All @@ -84,6 +92,17 @@ def run_model(model: torch.nn.Module, wp: type):
output = model(input_batch)
print("done.")

print_top_results(output)


def print_top_results(output: torch.Tensor) -> None:
"""Prints top 5 results
Parameters
----------
output: torch.Tensor
Output from ResNet-18.
"""
# Run a softmax to get probabilities
probabilities = torch.nn.functional.softmax(output[0], dim=0)

Expand All @@ -95,19 +114,21 @@ def run_model(model: torch.nn.Module, wp: type):
top5_prob, top5_catid = torch.topk(probabilities, 5)
print("\nTop 5 results:\n")
for i in range(top5_prob.size(0)):
id = top5_catid[i]
print(f"{categories[id]} (id={id}): probability = {top5_prob[i].item()}")
cat_id = top5_catid[i]
print(
f"{categories[cat_id]} (id={cat_id}): probability = {top5_prob[i].item()}"
)


if __name__ == "__main__":
wp = np.float32
np_precision = np.float32

if wp == np.float32:
wp_torch = torch.float32
elif wp == np.float64:
wp_torch = torch.float64
if np_precision == np.float32:
torch_precision = torch.float32
elif np_precision == np.float64:
torch_precision = torch.float64
else:
raise ValueError("`wp` must be of type `np.float32` or `np.float64`")
raise ValueError("`np_precision` must be of type `np.float32` or `np.float64`")

rn_model = initialize(wp_torch)
run_model(rn_model, wp)
rn_model = initialize(torch_precision)
run_model(rn_model, np_precision)
32 changes: 4 additions & 28 deletions examples/2_ResNet18/resnet_infer_python.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import numpy as np
import torch
from resnet18 import print_top_results


def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor:
Expand All @@ -22,7 +23,7 @@ def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor:
output : torch.Tensor
result of running inference on model with Tensor of ones
"""
transposed_shape = [224, 224, 3, 1]
transposed_shape = [224, 224, 3, batch_size]
precision = np.float32

np_data = np.fromfile("data/image_tensor.dat", dtype=precision)
Expand All @@ -48,31 +49,6 @@ def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor:
return output


def print_top_results(output: torch.Tensor) -> None:
"""Prints top 5 results
Parameters
----------
output: torch.Tensor
Output from ResNet-18.
"""
# Run a softmax to get probabilities
probabilities = torch.nn.functional.softmax(output[0], dim=0)

# Read ImageNet labels from text file
cats_filename = "data/categories.txt"
categories = np.genfromtxt(cats_filename, dtype=str, delimiter="\n")

# Show top categories per image
top5_prob, top5_catid = torch.topk(probabilities, 5)
print("\nTop 5 results:\n")
for i in range(top5_prob.size(0)):
cat_id = top5_catid[i]
print(
f"{categories[cat_id]} (id={cat_id}): probability = {top5_prob[i].item()}"
)


if __name__ == "__main__":
saved_model_file = "saved_resnet18_model_cpu.pt"

Expand All @@ -81,5 +57,5 @@ def print_top_results(output: torch.Tensor) -> None:

batch_size_to_run = 1

output = deploy(saved_model_file, device_to_run, batch_size_to_run)
print_top_results(output)
result = deploy(saved_model_file, device_to_run, batch_size_to_run)
print_top_results(result)
17 changes: 15 additions & 2 deletions examples/n_c_and_cpp/resnet_infer_python.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""Load ResNet-18 saved to TorchScript and run inference with ones."""

import torch
from PIL import Image


def deploy(saved_model, device, batch_size=1):
Expand All @@ -22,7 +23,20 @@ def deploy(saved_model, device, batch_size=1):
result of running inference on model with Tensor of ones
"""

input_tensor = torch.ones(batch_size, 3, 224, 224)
image_filename = "data/dog.jpg"
input_image = Image.open(image_filename)
preprocess = torchvision.transforms.Compose(
[
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0)

if device == "cpu":
# Load saved TorchScript model
Expand All @@ -43,7 +57,6 @@ def deploy(saved_model, device, batch_size=1):


if __name__ == "__main__":

saved_model_file = "saved_resnet18_model_cpu.pt"

device_to_run = "cpu"
Expand Down

0 comments on commit 46bbc78

Please sign in to comment.