-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
fe3976f
commit 5bd0601
Showing
24 changed files
with
2,006 additions
and
6 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,127 @@ | ||
from argparse import Namespace | ||
from pathlib import Path | ||
from unittest.mock import MagicMock, patch | ||
|
||
import pytest | ||
|
||
from cli_tool_audit.__main__ import handle_create, reduce_args_tool_cli_tool_config_args | ||
|
||
|
||
@pytest.mark.parametrize( | ||
"input_args, expected_output", | ||
[ | ||
# Test case 1: All valid fields | ||
( | ||
Namespace(tool="example_tool", version="1.0.0", version_switch="latest", schema="semver", if_os="linux"), | ||
{"version": "1.0.0", "version_switch": "latest", "schema": "semver", "if_os": "linux"}, | ||
), | ||
# Test case 2: Mixed fields | ||
(Namespace(tool="example_tool", version="1.0.0", irrelevant="ignore_this"), {"version": "1.0.0"}), | ||
# Test case 3: No valid fields | ||
(Namespace(irrelevant="ignore_this"), {}), | ||
], | ||
) | ||
def test_reduce_args_tool_cli_tool_config_args(input_args, expected_output): | ||
""" | ||
Test reduce_args_tool_cli_tool_config_args function. | ||
Args: | ||
input_args: Namespace object simulating command-line arguments. | ||
expected_output: Expected dictionary after reduction. | ||
""" | ||
# Invoke the function with mocked arguments | ||
result = reduce_args_tool_cli_tool_config_args(input_args) | ||
|
||
# Check if the result matches expected output | ||
assert result == expected_output | ||
|
||
|
||
@pytest.mark.parametrize( | ||
"exception, expected_message", | ||
[ | ||
(ValueError("Tool already exists."), "Tool already exists."), | ||
(Exception("Unexpected error occurred."), "Unexpected error occurred."), | ||
], | ||
) | ||
def test_handle_create_with_exceptions(mocker, exception, expected_message): | ||
""" | ||
Test the handle_create function for handling exceptions. | ||
Args: | ||
mocker: Pytest mock fixture. | ||
exception: Exception to be raised while creating tool config. | ||
expected_message: Expected output message. | ||
""" | ||
# Arrange | ||
mock_config_manager = MagicMock() | ||
mock_config_manager.create_tool_config.side_effect = exception | ||
|
||
# Mock the ConfigManager to use the mocked instance | ||
with patch("cli_tool_audit.__main__.config_manager.ConfigManager", return_value=mock_config_manager): | ||
# Create an argparse Namespace that simulates command line arguments | ||
args = Namespace(tool="example_tool", config=str(Path("mock_config.toml"))) | ||
|
||
# Act and Assert | ||
if isinstance(exception, ValueError): | ||
with pytest.raises(ValueError, match=expected_message): | ||
handle_create(args) | ||
else: | ||
try: | ||
handle_create(args) | ||
except Exception as e: | ||
assert str(e) == expected_message | ||
|
||
|
||
@pytest.fixture | ||
def mock_config_manager(): | ||
"""Fixture to create a mocked ConfigManager.""" | ||
with patch("cli_tool_audit.__main__.config_manager.ConfigManager") as MockConfigManager: | ||
# Create a mock instance of ConfigManager | ||
mock_instance = MockConfigManager.return_value | ||
yield mock_instance | ||
|
||
|
||
def test_handle_create_success(mock_config_manager): | ||
"""Test the happy path scenario for handle_create.""" | ||
# Arrange | ||
tool_name = "example_tool" | ||
config_data = {"version": "1.0.0"} | ||
|
||
# Mock the method to succeed | ||
mock_config_manager.create_tool_config.return_value = None | ||
|
||
args = Namespace(tool=tool_name, config=str(Path("mock_config.toml")), **config_data) | ||
|
||
# Act | ||
handle_create(args) | ||
|
||
# Assert | ||
mock_config_manager.create_tool_config.assert_called_once_with(tool_name, config_data) | ||
|
||
|
||
@pytest.mark.parametrize("existing_tool, expected_message", [("example_tool", "Tool already exists.")]) | ||
def test_handle_create_tool_exists(mock_config_manager, existing_tool, expected_message): | ||
"""Test the edge case where a tool configuration already exists.""" | ||
# Arrange | ||
mock_config_manager.create_tool_config.side_effect = ValueError(expected_message) | ||
|
||
args = Namespace(tool=existing_tool, config=str(Path("mock_config.toml")), version="1.0.0") | ||
|
||
# Act & Assert | ||
with pytest.raises(ValueError, match=expected_message): | ||
handle_create(args) | ||
|
||
|
||
def test_handle_create_unexpected_error(mock_config_manager): | ||
"""Test the error condition when an unexpected error occurs.""" | ||
# Arrange | ||
mock_config_manager.create_tool_config.side_effect = Exception("Unexpected error occurred.") | ||
|
||
args = Namespace(tool="another_tool", config=str(Path("mock_config.toml")), version="1.0.0") | ||
|
||
# Act | ||
with pytest.raises(Exception) as exc_info: | ||
handle_create(args) | ||
|
||
# Assert | ||
assert str(exc_info.value) == "Unexpected error occurred." |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,139 @@ | ||
import json | ||
from unittest.mock import MagicMock, patch | ||
|
||
import pytest | ||
|
||
from cli_tool_audit.audit_cache import AuditFacade | ||
from cli_tool_audit.json_utils import custom_json_serializer | ||
from cli_tool_audit.models import CliToolConfig, ToolCheckResult | ||
|
||
|
||
@pytest.fixture | ||
def fake_tool_config(): | ||
# Create a mock CliToolConfig instance | ||
config = MagicMock(spec=CliToolConfig) | ||
config.name = "test_tool" | ||
config.cache_hash.return_value = "dummy_hash" | ||
return config | ||
|
||
|
||
@pytest.fixture | ||
def audit_facade(tmp_path): | ||
return AuditFacade(cache_dir=tmp_path) | ||
|
||
|
||
@pytest.mark.parametrize( | ||
"cache_content, expected_result, expected_hit", [(None, None, False)] # Test for non-existent file | ||
) | ||
def test_read_from_cache(audit_facade, fake_tool_config, cache_content, expected_result, expected_hit, mocker): | ||
cache_file = audit_facade.get_cache_filename(fake_tool_config) | ||
|
||
# Write the cache content to the cache file if it's valid JSON | ||
if cache_content is not None: | ||
if isinstance(cache_content, dict): | ||
with open(cache_file, "w", encoding="utf-8") as f: | ||
json.dump(cache_content, f, default=custom_json_serializer) # Write valid JSON | ||
else: | ||
with open(cache_file, "w", encoding="utf-8") as f: | ||
f.write(cache_content) # Write invalid JSON | ||
|
||
if expected_hit: | ||
# Mock the ToolCheckResult to return expected attributes | ||
mock_result = MagicMock(spec=ToolCheckResult) | ||
mock_result.is_problem.return_value = False | ||
mock_result.__dict__.update(cache_content) # Simulate deserialized result | ||
|
||
mocker.patch("cli_tool_audit.models.ToolCheckResult", return_value=mock_result) | ||
|
||
# Act | ||
result = audit_facade.read_from_cache(fake_tool_config) | ||
|
||
# Assert | ||
if expected_result: | ||
assert result is not None # Expecting a valid ToolCheckResult | ||
assert isinstance(result, ToolCheckResult) | ||
assert audit_facade.cache_hit is True | ||
else: | ||
assert result is None | ||
assert audit_facade.cache_hit is False | ||
|
||
|
||
# @pytest.fixture | ||
# def fake_tool_config(): | ||
# # Create a mock CliToolConfig instance | ||
# config = MagicMock(spec=CliToolConfig) | ||
# config.name = "test_tool" | ||
# config.cache_hash.return_value = "dummy_hash" | ||
# return config | ||
# | ||
# | ||
# @pytest.fixture | ||
# def audit_facade(tmp_path): | ||
# return AuditFacade(cache_dir=tmp_path) | ||
|
||
|
||
def test_read_from_cache_file_not_found(audit_facade, fake_tool_config): | ||
# Simulate file not found scenario | ||
with patch("pathlib.Path.exists", return_value=False): | ||
result = audit_facade.read_from_cache(fake_tool_config) | ||
|
||
assert result is None | ||
assert audit_facade.cache_hit is False | ||
|
||
|
||
# @pytest.fixture | ||
# def fake_tool_config(): | ||
# config = MagicMock(spec=CliToolConfig) | ||
# config.name = "test_tool" | ||
# config.cache_hash.return_value = "dummy_hash" | ||
# return config | ||
# | ||
# | ||
# @pytest.fixture | ||
# def audit_facade(tmp_path): | ||
# return AuditFacade(cache_dir=tmp_path) | ||
|
||
|
||
def test_happy_path_write_to_cache(audit_facade, fake_tool_config): | ||
# Create a mock result to write to the cache | ||
mock_result = MagicMock(spec=ToolCheckResult) | ||
mock_result.__dict__ = {"status": "success"} | ||
|
||
# Write to cache | ||
audit_facade.write_to_cache(fake_tool_config, mock_result) | ||
|
||
# Check that the cache file was created | ||
cache_filename = audit_facade.get_cache_filename(fake_tool_config) | ||
assert cache_filename.exists() | ||
|
||
# Verify the content of the cache file | ||
with open(cache_filename, encoding="utf-8") as f: | ||
cached_data = json.load(f) | ||
assert cached_data["status"] == "success" | ||
|
||
|
||
def test_edge_case_tool_name_with_special_character(audit_facade): | ||
special_config = MagicMock(spec=CliToolConfig) | ||
special_config.name = "tool@#" | ||
special_config.cache_hash.return_value = "special_hash" | ||
|
||
# Write to cache with special character | ||
cache_filename = audit_facade.get_cache_filename(special_config) | ||
mock_result = MagicMock(spec=ToolCheckResult) | ||
mock_result.__dict__ = {"status": "success"} | ||
audit_facade.write_to_cache(special_config, mock_result) | ||
|
||
assert cache_filename.exists() # Verify the file was created | ||
|
||
|
||
def test_edge_case_multiple_cache_writes(audit_facade, fake_tool_config): | ||
# Write to cache multiple times | ||
for i in range(3): | ||
mock_result = MagicMock(spec=ToolCheckResult) | ||
mock_result.__dict__ = {"status": f"success-{i}"} | ||
audit_facade.write_to_cache(fake_tool_config, mock_result) | ||
|
||
# Verify the last write has overwritten previous ones | ||
with open(audit_facade.get_cache_filename(fake_tool_config), encoding="utf-8") as f: | ||
cached_data = json.load(f) | ||
assert cached_data["status"] == "success-2" # Last write in the loop |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
import pytest | ||
|
||
from cli_tool_audit.audit_manager import AuditManager | ||
|
||
|
||
@pytest.fixture | ||
def audit_manager(): | ||
return AuditManager() |
Oops, something went wrong.