Skip to content

Commit

Permalink
Merge branch 'main' into add_coding_ddpg
Browse files Browse the repository at this point in the history
  • Loading branch information
vmoens committed Jun 13, 2023
2 parents ed57fca + 4edfd23 commit b878abd
Show file tree
Hide file tree
Showing 9 changed files with 191 additions and 120 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ download:
tar $(TAROPTS) -xzf $(DATADIR)/UrbanSound8K.tar.gz -C ./beginner_source/data/

# Download model for beginner_source/fgsm_tutorial.py
wget -nv -N https://s3.amazonaws.com/pytorch-tutorial-assets/lenet_mnist_model.pth -P $(DATADIR)
wget -nv -N 'https://docs.google.com/uc?export=download&id=1HJV2nUHJqclXQ8flKvcWmjZ-OU5DGatl' -O $(DATADIR)/lenet_mnist_model.pth
cp $(DATADIR)/lenet_mnist_model.pth ./beginner_source/data/lenet_mnist_model.pth

# Download model for advanced_source/dynamic_quantization_tutorial.py
Expand Down
13 changes: 7 additions & 6 deletions advanced_source/neural_style_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
developed by Leon A. Gatys, Alexander S. Ecker and Matthias Bethge.
Neural-Style, or Neural-Transfer, allows you to take an image and
reproduce it with a new artistic style. The algorithm takes three images,
an input image, a content-image, and a style-image, and changes the input
an input image, a content-image, and a style-image, and changes the input
to resemble the content of the content-image and the artistic style of the style-image.
Expand Down Expand Up @@ -70,6 +70,7 @@
# method is used to move tensors or modules to a desired device.

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.set_default_device(device)

######################################################################
# Loading the Images
Expand Down Expand Up @@ -261,7 +262,7 @@ def forward(self, input):
# network to evaluation mode using ``.eval()``.
#

cnn = models.vgg19(pretrained=True).features.to(device).eval()
cnn = models.vgg19(pretrained=True).features.eval()



Expand All @@ -271,8 +272,8 @@ def forward(self, input):
# We will use them to normalize the image before sending it into the network.
#

cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406])
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225])

# create a module to normalize input image so we can easily put it in a
# ``nn.Sequential``
Expand Down Expand Up @@ -308,7 +309,7 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
content_layers=content_layers_default,
style_layers=style_layers_default):
# normalization module
normalization = Normalization(normalization_mean, normalization_std).to(device)
normalization = Normalization(normalization_mean, normalization_std)

# just in order to have an iterable access to or list of content/style
# losses
Expand Down Expand Up @@ -373,7 +374,7 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
#
# ::
#
# input_img = torch.randn(content_img.data.size(), device=device)
# input_img = torch.randn(content_img.data.size())

# add the original input image to the figure:
plt.figure()
Expand Down
14 changes: 7 additions & 7 deletions beginner_source/examples_autograd/polynomial_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,23 +18,23 @@
import math

dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.set_default_device(device)

# Create Tensors to hold input and outputs.
# By default, requires_grad=False, which indicates that we do not need to
# compute gradients with respect to these Tensors during the backward pass.
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
x = torch.linspace(-math.pi, math.pi, 2000, dtype=dtype)
y = torch.sin(x)

# Create random Tensors for weights. For a third order polynomial, we need
# 4 weights: y = a + b x + c x^2 + d x^3
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Tensors during the backward pass.
a = torch.randn((), device=device, dtype=dtype, requires_grad=True)
b = torch.randn((), device=device, dtype=dtype, requires_grad=True)
c = torch.randn((), device=device, dtype=dtype, requires_grad=True)
d = torch.randn((), device=device, dtype=dtype, requires_grad=True)
a = torch.randn((), dtype=dtype, requires_grad=True)
b = torch.randn((), dtype=dtype, requires_grad=True)
c = torch.randn((), dtype=dtype, requires_grad=True)
d = torch.randn((), dtype=dtype, requires_grad=True)

learning_rate = 1e-6
for t in range(2000):
Expand Down
66 changes: 50 additions & 16 deletions beginner_source/fgsm_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@
# - ``pretrained_model`` - path to the pretrained MNIST model which was
# trained with
# `pytorch/examples/mnist <https://github.com/pytorch/examples/tree/master/mnist>`__.
# For simplicity, download the pretrained model `here <https://drive.google.com/drive/folders/1fn83DF14tWmit0RTKWRhPq5uVXt73e0h?usp=sharing>`__.
# For simplicity, download the pretrained model `here <https://drive.google.com/file/d/1HJV2nUHJqclXQ8flKvcWmjZ-OU5DGatl/view?usp=drive_link>`__.
#
# - ``use_cuda`` - boolean flag to use CUDA if desired and available.
# Note, a GPU with CUDA is not critical for this tutorial as a CPU will
Expand Down Expand Up @@ -154,26 +154,34 @@
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)

def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
output = F.log_softmax(x, dim=1)
return output

# MNIST Test dataset and dataloader declaration
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([
transforms.ToTensor(),
])),
transforms.Normalize((0.1307,), (0.3081,)),
])),
batch_size=1, shuffle=True)

# Define what device we are using
Expand All @@ -184,7 +192,7 @@ def forward(self, x):
model = Net().to(device)

# Load the pretrained model
model.load_state_dict(torch.load(pretrained_model, weights_only=True, map_location='cpu'))
model.load_state_dict(torch.load(pretrained_model, map_location=device))

# Set the model in evaluation mode. In this case this is for the Dropout layers
model.eval()
Expand Down Expand Up @@ -219,6 +227,26 @@ def fgsm_attack(image, epsilon, data_grad):
# Return the perturbed image
return perturbed_image

# restores the tensors to their original scale
def denorm(batch, mean=[0.1307], std=[0.3081]):
"""
Convert a batch of tensors to their original scale.
Args:
batch (torch.Tensor): Batch of normalized tensors.
mean (torch.Tensor or list): Mean used for normalization.
std (torch.Tensor or list): Standard deviation used for normalization.
Returns:
torch.Tensor: batch of tensors without normalization applied to them.
"""
if isinstance(mean, list):
mean = torch.tensor(mean).to(device)
if isinstance(std, list):
std = torch.tensor(std).to(device)

return batch * std.view(1, -1, 1, 1) + mean.view(1, -1, 1, 1)


######################################################################
# Testing Function
Expand Down Expand Up @@ -273,11 +301,17 @@ def test( model, device, test_loader, epsilon ):
# Collect ``datagrad``
data_grad = data.grad.data

# Restore the data to its original scale
data_denorm = denorm(data)

# Call FGSM Attack
perturbed_data = fgsm_attack(data, epsilon, data_grad)
perturbed_data = fgsm_attack(data_denorm, epsilon, data_grad)

# Reapply normalization
perturbed_data_normalized = transforms.Normalize((0.1307,), (0.3081,))(perturbed_data)

# Re-classify the perturbed image
output = model(perturbed_data)
output = model(perturbed_data_normalized)

# Check for success
final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
Expand Down
Loading

0 comments on commit b878abd

Please sign in to comment.