Skip to content

Commit

Permalink
refactor for codeQl fix
Browse files Browse the repository at this point in the history
Signed-off-by: GiulioZizzo <giulio.zizzo@yahoo.co.uk>
  • Loading branch information
GiulioZizzo committed Dec 1, 2023
1 parent 5a92140 commit 105c881
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 10 deletions.
28 changes: 20 additions & 8 deletions art/experimental/attacks/evasion/fast_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
"""
import copy
from collections import UserDict
from typing import Optional, Union, TYPE_CHECKING
from typing import List, Optional, Union, TYPE_CHECKING

import numpy as np

Expand Down Expand Up @@ -198,7 +198,10 @@ def _minimal_perturbation(self, x: np.ndarray, y: np.ndarray, mask: np.ndarray)
current_x = self._apply_perturbation(x[batch_index_1:batch_index_2], perturbation, current_eps)

# Update
adv_x[active_indices] = current_x[active_indices]
if isinstance(adv_x, HuggingFaceMultiModalInput):
adv_x[active_indices] = current_x[active_indices]

Check failure

Code scanning / CodeQL

Modification of parameter with default Error

This expression mutates a
default value
.
else:
raise ValueError("Compatibility supported for HF style inputs")

adv_preds = self.estimator.predict(adv_x[batch_index_1:batch_index_2])
# If targeted active check to see whether we have hit the target, otherwise head to anything but
Expand Down Expand Up @@ -274,7 +277,10 @@ def _compute(
import torch
batch_eps: Union[int, float, np.ndarray]
batch_eps_step: Union[int, float, np.ndarray]
original_type = x['pixel_values'].dtype
if isinstance(x, HuggingFaceMultiModalInput):
original_type = x['pixel_values'].dtype
else:
original_type = x.dtype

if random_init:
n = x.shape[0]
Expand All @@ -294,7 +300,8 @@ def _compute(
x_adv = x.astype(ART_NUMPY_DTYPE)

# Compute perturbation with implicit batching
x_adv_result = []
x_adv_result_list: List[torch.Tensor] = []
x_adv_np_result_list: List[np.ndarray] = []
for batch_id in range(int(np.ceil(x.shape[0] / float(self.batch_size)))):
if batch_id_ext is None:
self._batch_id = batch_id
Expand Down Expand Up @@ -355,7 +362,12 @@ def _compute(
x_adv_batch - x_init[batch_index_1:batch_index_2], batch_eps, self.norm
)
x_adv_batch = x_init[batch_index_1:batch_index_2] + perturbation
x_adv_result.append(x_adv_batch['pixel_values'])

x_adv_result = torch.concatenate(x_adv_result)
return x_adv.update_pixels(x_adv_result)
if isinstance(x_adv, HuggingFaceMultiModalInput):
x_adv_result_list.append(x_adv_batch['pixel_values'])
if isinstance(x_adv, np.ndarray):
x_adv_np_result_list.append(x_adv_batch)

if isinstance(original_type, str) or isinstance(original_type, torch.dtype):
x_adv_result = torch.concatenate(x_adv_result_list).type(original_type)
return x_adv.update_pixels(x_adv_result) # type: ignore
return np.concatenate(x_adv_np_result_list)
Original file line number Diff line number Diff line change
Expand Up @@ -184,8 +184,21 @@ def __len__(self) -> int:
pixel_values = UserDict.__getitem__(self, "pixel_values")
return len(pixel_values)

def update_pixels(self, pixel_values: torch.Tensor) -> None:
super().__setitem__("pixel_values", pixel_values)
def update_pixels(self, updated_pixel_values: torch.Tensor,
indices: Optional[np.ndarray] = None) -> HuggingFaceMultiModalInput:
"""
Helper method to set pixel values
:param updated_pixel_values: pixel values to set.
:param indices: If to partially update the values based on indices
"""
if indices is None:
super().__setitem__("pixel_values", updated_pixel_values)
else:
indices_list = indices.tolist()
pixel_values = UserDict.__getitem__(self, "pixel_values")
pixel_values[indices_list] = updated_pixel_values[indices_list]
super().__setitem__("pixel_values", pixel_values)
return self

def reshape(self, new_shape: Tuple) -> HuggingFaceMultiModalInput:
"""
Expand Down

0 comments on commit 105c881

Please sign in to comment.