Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactored test assertions that have suboptimal tests with numbers #7671

Merged
merged 5 commits into from
Apr 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion tests/test_bundle_get_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def test_get_all_bundles_list(self, params):
output = get_all_bundles_list(**params)
self.assertTrue(isinstance(output, list))
self.assertTrue(isinstance(output[0], tuple))
self.assertTrue(len(output[0]) == 2)
self.assertEqual(len(output[0]), 2)

@parameterized.expand([TEST_CASE_1, TEST_CASE_5])
@skip_if_quick
Expand Down
10 changes: 6 additions & 4 deletions tests/test_compute_regression_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,22 +70,24 @@ def test_shape_reduction(self):
mt = mt_fn(reduction="mean")
mt(in_tensor, in_tensor)
out_tensor = mt.aggregate()
self.assertTrue(len(out_tensor.shape) == 1)
self.assertEqual(len(out_tensor.shape), 1)

mt = mt_fn(reduction="sum")
mt(in_tensor, in_tensor)
out_tensor = mt.aggregate()
self.assertTrue(len(out_tensor.shape) == 0)
self.assertEqual(len(out_tensor.shape), 0)

mt = mt_fn(reduction="sum") # test reduction arg overriding
mt(in_tensor, in_tensor)
out_tensor = mt.aggregate(reduction="mean_channel")
self.assertTrue(len(out_tensor.shape) == 1 and out_tensor.shape[0] == batch)
self.assertEqual(len(out_tensor.shape), 1)
self.assertEqual(out_tensor.shape[0], batch)

mt = mt_fn(reduction="sum_channel")
mt(in_tensor, in_tensor)
out_tensor = mt.aggregate()
self.assertTrue(len(out_tensor.shape) == 1 and out_tensor.shape[0] == batch)
self.assertEqual(len(out_tensor.shape), 1)
self.assertEqual(out_tensor.shape[0], batch)

def test_compare_numpy(self):
set_determinism(seed=123)
Expand Down
16 changes: 8 additions & 8 deletions tests/test_handler_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,9 @@ def _update_metric(engine):
if has_key_word.match(line):
content_count += 1
if epoch_log is True:
self.assertTrue(content_count == max_epochs)
self.assertEqual(content_count, max_epochs)
else:
self.assertTrue(content_count == 2) # 2 = len([1, 2]) from event_filter
self.assertEqual(content_count, 2) # 2 = len([1, 2]) from event_filter

@parameterized.expand([[True], [get_event_filter([1, 3])]])
def test_loss_print(self, iteration_log):
Expand Down Expand Up @@ -116,9 +116,9 @@ def _train_func(engine, batch):
if has_key_word.match(line):
content_count += 1
if iteration_log is True:
self.assertTrue(content_count == num_iters * max_epochs)
self.assertEqual(content_count, num_iters * max_epochs)
else:
self.assertTrue(content_count == 2) # 2 = len([1, 3]) from event_filter
self.assertEqual(content_count, 2) # 2 = len([1, 3]) from event_filter

def test_loss_dict(self):
log_stream = StringIO()
Expand Down Expand Up @@ -150,7 +150,7 @@ def _train_func(engine, batch):
for line in output_str.split("\n"):
if has_key_word.match(line):
content_count += 1
self.assertTrue(content_count > 0)
self.assertGreater(content_count, 0)

def test_loss_file(self):
key_to_handler = "test_logging"
Expand Down Expand Up @@ -184,7 +184,7 @@ def _train_func(engine, batch):
for line in output_str.split("\n"):
if has_key_word.match(line):
content_count += 1
self.assertTrue(content_count > 0)
self.assertGreater(content_count, 0)

def test_exception(self):
# set up engine
Expand Down Expand Up @@ -239,7 +239,7 @@ def _update_metric(engine):
for line in output_str.split("\n"):
if has_key_word.match(line):
content_count += 1
self.assertTrue(content_count > 0)
self.assertGreater(content_count, 0)

def test_default_logger(self):
log_stream = StringIO()
Expand Down Expand Up @@ -274,7 +274,7 @@ def _train_func(engine, batch):
for line in output_str.split("\n"):
if has_key_word.match(line):
content_count += 1
self.assertTrue(content_count > 0)
self.assertGreater(content_count, 0)


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion tests/test_invertd.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def test_invert(self):
# 25300: 2 workers (cpu, non-macos)
# 1812: 0 workers (gpu or macos)
# 1821: windows torch 1.10.0
self.assertTrue((reverted.size - n_good) < 40000, f"diff. {reverted.size - n_good}")
self.assertLess((reverted.size - n_good), 40000, f"diff. {reverted.size - n_good}")

set_determinism(seed=None)

Expand Down
4 changes: 2 additions & 2 deletions tests/test_load_spacing_orientation.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def test_load_spacingd(self, filename):
ref = resample_to_output(anat, (1, 0.2, 1), order=1)
t2 = time.time()
print(f"time scipy: {t2 - t1}")
self.assertTrue(t2 >= t1)
self.assertGreaterEqual(t2, t1)
np.testing.assert_allclose(res_dict["image"].affine, ref.affine)
np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape)
np.testing.assert_allclose(ref.get_fdata(), res_dict["image"][0], atol=0.05)
Expand All @@ -68,7 +68,7 @@ def test_load_spacingd_rotate(self, filename):
ref = resample_to_output(anat, (1, 2, 3), order=1)
t2 = time.time()
print(f"time scipy: {t2 - t1}")
self.assertTrue(t2 >= t1)
self.assertGreaterEqual(t2, t1)
np.testing.assert_allclose(res_dict["image"].affine, ref.affine)
if "anatomical" not in filename:
np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape)
Expand Down
4 changes: 2 additions & 2 deletions tests/test_meta_affine.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def test_linear_consistent(self, xform_cls, input_dict, atol):
diff = np.abs(itk.GetArrayFromImage(ref_2) - itk.GetArrayFromImage(expected))
avg_diff = np.mean(diff)

self.assertTrue(avg_diff < atol, f"{xform_cls} avg_diff: {avg_diff}, tol: {atol}")
self.assertLess(avg_diff, atol, f"{xform_cls} avg_diff: {avg_diff}, tol: {atol}")

@parameterized.expand(TEST_CASES_DICT)
def test_linear_consistent_dict(self, xform_cls, input_dict, atol):
Expand All @@ -175,7 +175,7 @@ def test_linear_consistent_dict(self, xform_cls, input_dict, atol):
diff = {k: np.abs(itk.GetArrayFromImage(ref_2[k]) - itk.GetArrayFromImage(expected[k])) for k in keys}
avg_diff = {k: np.mean(diff[k]) for k in keys}
for k in keys:
self.assertTrue(avg_diff[k] < atol, f"{xform_cls} avg_diff: {avg_diff}, tol: {atol}")
self.assertLess(avg_diff[k], atol, f"{xform_cls} avg_diff: {avg_diff}, tol: {atol}")


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion tests/test_persistentdataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def test_different_transforms(self):
im1 = PersistentDataset([im], Identity(), cache_dir=path, hash_transform=json_hashing)[0]
im2 = PersistentDataset([im], Flip(1), cache_dir=path, hash_transform=json_hashing)[0]
l2 = ((im1 - im2) ** 2).sum() ** 0.5
self.assertTrue(l2 > 1)
self.assertGreater(l2, 1)


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion tests/test_rand_weighted_cropd.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def test_rand_weighted_cropd(self, _, init_params, input_data, expected_shape, e
crop = RandWeightedCropd(**init_params)
crop.set_random_state(10)
result = crop(input_data)
self.assertTrue(len(result) == init_params["num_samples"])
self.assertEqual(len(result), init_params["num_samples"])
_len = len(tuple(input_data.keys()))
self.assertTupleEqual(tuple(result[0].keys())[:_len], tuple(input_data.keys()))

Expand Down
2 changes: 1 addition & 1 deletion tests/test_recon_net_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def test_reshape_channel_complex(self, test_data):
def test_complex_normalize(self, test_data):
result, mean, std = complex_normalize(test_data)
result = result * std + mean
self.assertTrue((((result - test_data) ** 2).mean() ** 0.5).item() < 1e-5)
self.assertLess((((result - test_data) ** 2).mean() ** 0.5).item(), 1e-5)

@parameterized.expand(TEST_PAD)
def test_pad(self, test_data):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_reg_loss_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def forward(self, x):
# backward pass
loss_val.backward()
optimizer.step()
self.assertTrue(init_loss > loss_val, "loss did not decrease")
self.assertGreater(init_loss, loss_val, "loss did not decrease")


if __name__ == "__main__":
Expand Down
4 changes: 2 additions & 2 deletions tests/test_sobel_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,8 @@ def test_sobel_gradients(self, image, arguments, expected_grad):
)
def test_sobel_kernels(self, arguments, expected_kernels):
sobel = SobelGradients(**arguments)
self.assertTrue(sobel.kernel_diff.dtype == expected_kernels[0].dtype)
self.assertTrue(sobel.kernel_smooth.dtype == expected_kernels[0].dtype)
self.assertEqual(sobel.kernel_diff.dtype, expected_kernels[0].dtype)
self.assertEqual(sobel.kernel_smooth.dtype, expected_kernels[0].dtype)
assert_allclose(sobel.kernel_diff, expected_kernels[0])
assert_allclose(sobel.kernel_smooth, expected_kernels[1])

Expand Down
4 changes: 2 additions & 2 deletions tests/test_sobel_gradientd.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,8 +187,8 @@ def test_sobel_gradients(self, image_dict, arguments, expected_grad):
)
def test_sobel_kernels(self, arguments, expected_kernels):
sobel = SobelGradientsd(**arguments)
self.assertTrue(sobel.kernel_diff.dtype == expected_kernels[0].dtype)
self.assertTrue(sobel.kernel_smooth.dtype == expected_kernels[0].dtype)
self.assertEqual(sobel.kernel_diff.dtype, expected_kernels[0].dtype)
self.assertEqual(sobel.kernel_smooth.dtype, expected_kernels[0].dtype)
assert_allclose(sobel.kernel_diff, expected_kernels[0])
assert_allclose(sobel.kernel_smooth, expected_kernels[1])

Expand Down
2 changes: 1 addition & 1 deletion tests/test_threadcontainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def test_container(self):

self.assertTrue(con.is_alive)
self.assertIsNotNone(con.status())
self.assertTrue(len(con.status_dict) > 0)
self.assertGreater(len(con.status_dict), 0)

con.join()

Expand Down
2 changes: 1 addition & 1 deletion tests/test_warp.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def test_itk_benchmark(self):
relative_diff = np.mean(
np.divide(monai_result - itk_result, itk_result, out=np.zeros_like(itk_result), where=(itk_result != 0))
)
self.assertTrue(relative_diff < 0.01)
self.assertLess(relative_diff, 0.01)

@parameterized.expand(TEST_CASES, skip_on_empty=True)
def test_resample(self, input_param, input_data, expected_val):
Expand Down
Loading