From 74b6c0dd68908b8b20241b7021690a306856027c Mon Sep 17 00:00:00 2001 From: Martin Vrachev Date: Tue, 22 Jun 2021 14:34:25 +0300 Subject: [PATCH] Test new API: serialization tests for invalid arg A while ago we decided that it's best to research each of the individuals attributes one by one and identify what level of validation it needs compared to how we use it: https://github.com/theupdateframework/tuf/pull/1366#issuecomment-829288790. This work is ongoing and there are a couple of commits already merged for this: - https://github.com/theupdateframework/tuf/commit/6c5d970799ffe1f79d02e24a8ea56afeae4e636b - https://github.com/theupdateframework/tuf/commit/f20664d2fc967852d8e3aca7f951e4d1e7855e61 - https://github.com/theupdateframework/tuf/commit/41afb1e1342d5f8870180ba1ecea8894c03aec24 We want to be able to test the attributes validation against known bad values. The way we want to do that is with table testing we have added using decorators for our metadata classes defined in New API: https://github.com/theupdateframework/tuf/pull/1416. This gives us an easy way to add new cases for each of the attributes and not depend on external files. Signed-off-by: Martin Vrachev --- tests/test_metadata_serialization.py | 196 ++++++++++++++++++++++++++- 1 file changed, 195 insertions(+), 1 deletion(-) diff --git a/tests/test_metadata_serialization.py b/tests/test_metadata_serialization.py index 920899599b..4749cd231f 100644 --- a/tests/test_metadata_serialization.py +++ b/tests/test_metadata_serialization.py @@ -12,11 +12,13 @@ import unittest import copy -from typing import Dict, Callable +from typing import Dict, Callable, Optional, Mapping, Any +from datetime import datetime from tests import utils from tuf.api.metadata import ( + Signed, Root, Snapshot, Timestamp, @@ -197,6 +199,198 @@ def test_targets_serialization(self, test_case_data): self.assertDictEqual(case_dict, targets.to_dict()) +class TestSigned(Signed): + """Used for testing the abstract "Signed" class.""" + + _signed_type = "signed" + + def __init__( + self, + version: int, + spec_version: str, + expires: datetime, + unrecognized_fields: Optional[Mapping[str, Any]] + ) -> None: + super().__init__( + version, spec_version, expires, unrecognized_fields + ) + + @classmethod + def from_dict(cls, signed_dict: Dict[str, Any]) -> "Signed": + common_args = super()._common_fields_from_dict(signed_dict) + # All fields left in the signed_dict are unrecognized. + return cls(*common_args, signed_dict) + + def to_dict(self) -> Dict[str, Any]: + """Returns the dict representation of self.""" + return super()._common_fields_to_dict() + + +class TestInvalidSerialization(unittest.TestCase): + + invalid_type: DataSet = { + "no _type": '{"spec_version": "1.0.0", "expires": "2030-01-01T00:00:00Z"}', + "empty str _type": '{"_type": "", "spec_version": "1.0.0", "version": 1, "expires": "2030-01-01T00:00:00Z"}', + "_type wrong type": '{"_type": "foo", "spec_version": "1.0.0", "version": 1, "expires": "2030-01-01T00:00:00Z"}' + } + + @run_sub_tests_with_dataset(invalid_type) + def test_invalid_type(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((KeyError, ValueError)): + TestSigned.from_dict(copy.copy(case_dict)) + + invalid_spec_version: DataSet = { + "no spec_version": '{"_type": "signed", "version": 1, "expires": "2030-01-01T00:00:00Z"}', + "empty str spec_version": '{"_type": "signed", "spec_version": "", "version": 1, "expires": "2030-01-01T00:00:00Z"}', + "invalid spec_version str": '{"_type": "signed", "spec_version": "abc", "version": 1, "expires": "2030-01-01T00:00:00Z"}', + "one digit spec_version": '{"_type": "signed", "spec_version": "1", "version": 1, "expires": "2030-01-01T00:00:00Z"}', + "two digit spec_version": '{"_type": "signed", "spec_version": "1.2", "version": 1, "expires": "2030-01-01T00:00:00Z"}', + "no digit spec_version": '{"_type": "signed", "spec_version": "a.b.c", "version": 1, "expires": "2030-01-01T00:00:00Z"}', + "different major spec_version": '{"_type": "signed", "spec_version": "0.0.0", "version": 1, "expires": "2030-01-01T00:00:00Z"}', + } + + @run_sub_tests_with_dataset(invalid_spec_version) + def test_invalid_spec_version(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((KeyError, ValueError)): + TestSigned.from_dict(copy.copy(case_dict)) + + invalid_version: DataSet = { + "no version": '{"_type": "signed", "spec_version": "1.0.0", "expires": "2030-01-01T00:00:00Z"}', + "version wrong type": '{"_type": "signed", "spec_version": "1.0.0", "version": "a", "expires": "2030-01-01T00:00:00Z"}', + "version 0": '{"_type": "signed", "spec_version": "1.0.0", "version": 0, "expires": "2030-01-01T00:00:00Z"}', + "version below 0": '{"_type": "signed", "spec_version": "1.0.0", "version": -1, "expires": "2030-01-01T00:00:00Z"}' + } + + @run_sub_tests_with_dataset(invalid_version) + def test_invalid_version(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((KeyError, TypeError, ValueError)): + TestSigned.from_dict(copy.copy(case_dict)) + + invalid_expires: DataSet = { + "no expires": '{"_type": "signed", "spec_version": "1.0.0", "version": 1}', + "wrong datetime string": '{"_type": "signed", "spec_version": "1.0.0", "version": 1, "expires": "abc"}' + } + + @run_sub_tests_with_dataset(invalid_expires) + def test_invalid_expires(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((KeyError, ValueError)): + TestSigned.from_dict(copy.copy(case_dict)) + + invalid_keyid: DataSet = { + "no keyid": '{"keytype": "rsa", "scheme": "rsassa-pss-sha256", "keyval": {"public": "abc"}}', + "keyid wrong type": '{"keyid": 1, "keytype": "rsa", "scheme": "rsassa-pss-sha256", "keyval": {"public": "abc"}}', + } + + @run_sub_tests_with_dataset(invalid_keyid) + def test_invalid_keyid(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((TypeError, KeyError)): + keyid = case_dict.pop("keyid") + Key.from_dict(keyid, copy.copy(case_dict)) + + invalid_keytype: DataSet = { + "no keytype": '{"keyid": "id", "scheme": "rsassa-pss-sha256", "keyval": {"public": "foo"}}', + "keytype wrong type": '{"keyid": "id", "keytype": 1, "scheme": "rsassa-pss-sha256", "keyval": {"public": "abc"}}', + } + + @run_sub_tests_with_dataset(invalid_keytype) + def test_invalid_keytype(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((TypeError, KeyError)): + keyid = case_dict.pop("keyid") + Key.from_dict(keyid, copy.copy(case_dict)) + + invalid_scheme: DataSet = { + "no scheme": '{"keyid": "id", "keytype": "rsa", "keyval": {"public": "foo"}}', + "scheme wrong type": '{"keyid": "id", "keytype": "rsa", "scheme": 1, "keyval": {"public": "abc"}}', + } + + @run_sub_tests_with_dataset(invalid_scheme) + def test_invalid_scheme(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((TypeError, KeyError)): + keyid = case_dict.pop("keyid") + Key.from_dict(keyid, copy.copy(case_dict)) + + invalid_keyval: DataSet = { + "no keyval": '{"keyid": "id", "keytype": "rsa", "scheme": "rsassa-pss-sha256"}', + "no public in keyval": '{"keyid": "id", "keytype": "rsa", "scheme": "rsassa-pss-sha256", "keyval": {}}', + "keyval public wrong type": '{"keyid": "id", "keytype": "rsa", "scheme": "rsassa-pss-sha256", "keyval": {"public": 1}}', + "keyval wrong type": '{"keyid": "id", "keytype": "rsa", "scheme": "rsassa-pss-sha256", "keyval": 1}', + } + + @run_sub_tests_with_dataset(invalid_keyval) + def test_invalid_keyval(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((TypeError, KeyError)): + keyid = case_dict.pop("keyid") + Key.from_dict(keyid, copy.copy(case_dict)) + + invalid_threshold: DataSet = { + "no threshold": '{"keyids": ["keyid"]}', + "wrong threshold type": '{"keyids": ["keyid"], "threshold": "a"}', + "threshold below 1": '{"keyids": ["keyid"], "threshold": 0}' + } + + @run_sub_tests_with_dataset(invalid_threshold) + def test_invalid_threshold(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((KeyError, TypeError, ValueError)): + Role.from_dict(copy.deepcopy(case_dict)) + + invalid_keyids: DataSet = { + "no keyids": '{"threshold": 3}', + "duplicate keyids": '{"keyids": ["keyid", "keyid"], "threshold": 3}', + } + + @run_sub_tests_with_dataset(invalid_keyids) + def test_invalid_keyids(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((KeyError, ValueError)): + Role.from_dict(copy.deepcopy(case_dict)) + + invalid_length: DataSet = { + "wrong length type": '{"version": 1, "length": "a", "hashes": {"sha256" : "abc"}}', + "length 0": '{"version": 1, "length": 0, "hashes": {"sha256" : "abc"}}', + "length below 0": '{"version": 1, "length": -1, "hashes": {"sha256" : "abc"}}', + } + + @run_sub_tests_with_dataset(invalid_length) + def test_invalid_length(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((TypeError, ValueError)): + MetaFile.from_dict(copy.deepcopy(case_dict)) + + invalid_hashes: DataSet = { + "empty hashes dict": '{"version": 1, "length": 1, "hashes": {}}', + "hashes wrong type": '{"version": 1, "length": 1, "hashes": 1}', + # Hashes keys cannot be strings because the JSON specification. + # Then, do we want to test for that? + "hashes values wrong type": '{"version": 1, "length": 1, "hashes": {"sha256": 1}}', + } + + @run_sub_tests_with_dataset(invalid_hashes) + def test_invalid_hashes(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises((ValueError, AttributeError, TypeError)): + MetaFile.from_dict(copy.deepcopy(case_dict)) + + invalid_targetfile_hashes_length: DataSet = { + "no hashes": '{"length": 1}', + "no length": '{"hashes": {"sha256": 1}}' + # The remaining cases are the same as for invalid_hashes and + # invalid_length datasets. + } + @run_sub_tests_with_dataset(invalid_targetfile_hashes_length) + def test_invalid_targetfile_hashes_length(self, test_case_data: Dict[str, str]): + case_dict = json.loads(test_case_data) + with self.assertRaises(KeyError): + TargetFile.from_dict(copy.deepcopy(case_dict)) + # Run unit test. if __name__ == '__main__': utils.configure_test_logging(sys.argv)