From 8e2584a92bbcb03f4662badcda310cc1da0cb155 Mon Sep 17 00:00:00 2001 From: "Harsh Shah (C)" Date: Tue, 29 Dec 2020 12:45:21 +0530 Subject: [PATCH] feat: Adds mechanism to generate tests to validate savedsearches by parsing savedsearches.conf. --- .github/workflows/cla.yaml | 2 +- .github/workflows/release-notes.yaml | 2 +- build.py | 46 ---------------- docs/api_reference/addon_parser.rst | 6 +++ docs/cim_tests.rst | 8 +-- docs/field_tests.rst | 17 +++++- docs/how_to_use.rst | 4 +- docs/release_history.rst | 14 ++++- pytest_splunk_addon/plugin.py | 3 ++ .../standard_lib/addon_parser/__init__.py | 17 ++++++ .../addon_parser/savedsearches_parser.py | 52 ++++++++++++++++++ .../fields_tests/test_generator.py | 15 ++++++ .../fields_tests/test_templates.py | 50 +++++++++++++++++ .../TA_broken/default/savedsearches.conf | 49 +++++++++++++++++ .../TA_fiction/default/savedsearches.conf | 54 +++++++++++++++++++ tests/constants.py | 15 ++++++ 16 files changed, 298 insertions(+), 56 deletions(-) delete mode 100644 build.py create mode 100644 pytest_splunk_addon/standard_lib/addon_parser/savedsearches_parser.py create mode 100644 tests/addons/TA_broken/default/savedsearches.conf create mode 100644 tests/addons/TA_fiction/default/savedsearches.conf diff --git a/.github/workflows/cla.yaml b/.github/workflows/cla.yaml index a9e046d30..ea6c5b974 100644 --- a/.github/workflows/cla.yaml +++ b/.github/workflows/cla.yaml @@ -21,7 +21,7 @@ jobs: path-to-signatures: ".github/signatures/version1/cla.json" path-to-document: "https://github.com/splunk/addonfactory-test-releaseci/blob/main/CLA.md" # e.g. a CLA or a DCO document # branch should not be protected - branch: "master" + branch: "main" allowlist: dependabot #below are the optional inputs - If the optional inputs are not given, then default values will be taken #remote-organization-name: enter the remote organization name where the signatures should be stored (Default is storing the signatures in the same repository) diff --git a/.github/workflows/release-notes.yaml b/.github/workflows/release-notes.yaml index 4fa712426..2a0bd917a 100644 --- a/.github/workflows/release-notes.yaml +++ b/.github/workflows/release-notes.yaml @@ -15,7 +15,7 @@ jobs: git fetch --prune --unshallow --tags - uses: snyk/release-notes-preview@v1.6.1 with: - releaseBranch: master + releaseBranch: main env: GITHUB_PR_USERNAME: ${{ github.actor }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/build.py b/build.py deleted file mode 100644 index 4ea60af74..000000000 --- a/build.py +++ /dev/null @@ -1,46 +0,0 @@ -# SPDX-FileCopyrightText: 2020 2020 -# -# SPDX-License-Identifier: Apache-2.0 - -from splunk_add_on_ucc_framework import logger, sourcedir, recursive_overwrite - -import os -import shutil - -def setup_env(): - logger.info("Setting up Environment") - install_npm_dependencies = "npm install -g bower" - os.system(install_npm_dependencies) - os.chdir(os.path.join(sourcedir, "UCC-UI-lib", "bower_components", "SplunkWebCore")) - os.system("npm install") - - - -def generate_static_files(): - logger.info("Generating Static files") - os.chdir(os.path.join(sourcedir, "UCC-UI-lib")) - os.system("npm install") - os.system("bower install") - os.system("npm run build") - src = os.path.join(sourcedir, "UCC-UI-lib", "package", "appserver", "templates", "redirect.html") - dest = os.path.join(sourcedir, "UCC-UI-lib", "build", "appserver", "templates", "redirect.html") - shutil.copy(src, dest) - src = os.path.join(sourcedir, "UCC-UI-lib", "data", "redirect_page.js") - dest = os.path.join(sourcedir, "UCC-UI-lib", "build", "appserver", "static", "js", "build", "redirect_page.js") - shutil.copy(src, dest) - - -def migrate_package(): - logger.info("Exporting generated Package.") - src = os.path.join(os.path.join(sourcedir, "UCC-UI-lib", "build")) - dest = os.path.join(os.path.join(sourcedir, "package")) - if os.path.exists(dest): - shutil.rmtree(dest, ignore_errors=True) - os.makedirs(dest) - recursive_overwrite(src, dest) - - -def build_ucc(): - setup_env() - generate_static_files() - migrate_package() \ No newline at end of file diff --git a/docs/api_reference/addon_parser.rst b/docs/api_reference/addon_parser.rst index f713bb31c..44679e882 100644 --- a/docs/api_reference/addon_parser.rst +++ b/docs/api_reference/addon_parser.rst @@ -34,3 +34,9 @@ TransformsParser .. automodule:: standard_lib.addon_parser.transforms_parser :members: :show-inheritance: + +SavedsearchesParser +~~~~~~~~~~~~~~~~~~~ +.. automodule:: standard_lib.addon_parser.savedsearches_parser + :members: + :show-inheritance: diff --git a/docs/cim_tests.rst b/docs/cim_tests.rst index 4925c0f78..913129db6 100644 --- a/docs/cim_tests.rst +++ b/docs/cim_tests.rst @@ -7,7 +7,7 @@ Overview The CIM tests are written with a purpose of testing the compatibility of the add-on with CIM Data Models (Based on Splunk_SA_CIM 4.15.0). An add-on is said to be CIM compatible if it fulfils the following two criteria: -1. The add-on extracts all the fields with valid values, which are marked as required by the `Data Model Definitions `_. +1. The add-on extracts all the fields with valid values, which are marked as required by the `Data Model Definitions `_. 2. Any event for the add-on is not mapped with more than one data model. --------------------- @@ -34,7 +34,7 @@ Test Scenarios **Workflow:** * Plugin parses tags.conf to get a list of tags for each eventtype. - * Plugin parses all the `supported datamodels `_. + * Plugin parses all the `supported datamodels `_. * Then it gets a list of the datasets mapped with an eventtype. * Generates test case for each eventtype. @@ -80,11 +80,11 @@ Test Scenarios **Workflow:** - * Plugin collects the list of not_allowed_in_search fields from mapped datasets and `CommonFields.json `_. + * Plugin collects the list of not_allowed_in_search fields from mapped datasets and `CommonFields.json `_. * Using search query the test case verifies if not_allowed_in_search fields are populated in search or not. .. note:: - `CommonFields.json `_ contains fields which are automatically provided by asset and identity correlation features of applications like Splunk Enterprise Security. + `CommonFields.json `_ contains fields which are automatically provided by asset and identity correlation features of applications like Splunk Enterprise Security. **4. Testcase for all not_allowed_in_props fields** diff --git a/docs/field_tests.rst b/docs/field_tests.rst index 0d0699ac0..d6c638f70 100644 --- a/docs/field_tests.rst +++ b/docs/field_tests.rst @@ -15,6 +15,7 @@ Overview 5. Eval 6. Eventtypes 7. Tags + 8. Savedsearches -------------------------------- @@ -121,7 +122,21 @@ Test Scenarios **Workflow:** * In tags.conf for each tag defined in the stanza, the plugin generates a test case. - * For each tag, the plugin generates a search query including the stanza and the tag and asserts event_count > 0 + * For each tag, the plugin generates a search query including the stanza and the tag and asserts event_count > 0. + +**7. Search query should be present in each savedsearches.** + + .. code-block:: python + + test_savedsearches[] + + Test case verifies that the search mentioned in savedsearch.conf generates valid search results. + Here is a stanza mentioned in savedsearches.conf file. + + **Workflow:** + + * In savedsearches.conf for each stanza, the plugin generates a test case. + * For each stanza mentioned in savedsearches.conf plugin generates an SPL search query and asserts event_count > 0 for the savedsearch. Testcase Troubleshooting ------------------------ diff --git a/docs/how_to_use.rst b/docs/how_to_use.rst index ea9c15812..f5dbeb4c7 100644 --- a/docs/how_to_use.rst +++ b/docs/how_to_use.rst @@ -279,9 +279,9 @@ Extending pytest-splunk-addon How can this be achieved : - - Make json representation of the data models, which satisfies this `DataModelSchema `_. + - Make json representation of the data models, which satisfies this `DataModelSchema `_. - Provide the path to the directory having all the data models by adding ``--splunk_dm_path path_to_dir`` to the pytest command - - The test cases will now be generated for the data models provided to the plugin and not for the `default data models `_. + - The test cases will now be generated for the data models provided to the plugin and not for the `default data models `_. .. raw:: html diff --git a/docs/release_history.rst b/docs/release_history.rst index 55b862184..c556a5abb 100644 --- a/docs/release_history.rst +++ b/docs/release_history.rst @@ -8,7 +8,19 @@ Release History The best way to track the development of pytest-splunk-addon is through `the GitHub Repo `_. -1.3.14 +1.4.0 +"""""""""""""""""""""""""" + **Changes:** + + * Plugin now generates and executes tests to validate savedsearches defined in savedsearches.conf. + + **Known Issues:** + + * Event ingestion through SC4S via UDP port + * Fields for modular regular expressions are not extracted in the plugin. + + +1.3.15 """""""""""""""""""""""""" **Changes:** diff --git a/pytest_splunk_addon/plugin.py b/pytest_splunk_addon/plugin.py index 294e289d3..fcd32860f 100644 --- a/pytest_splunk_addon/plugin.py +++ b/pytest_splunk_addon/plugin.py @@ -37,6 +37,9 @@ def pytest_configure(config): "markers", "splunk_searchtime_fields_eventtypes: Test search time eventtypes only", ) + config.addinivalue_line( + "markers", "splunk_searchtime_fields_savedsearches: Test search time savedsearches only" + ) config.addinivalue_line( "markers", "splunk_searchtime_cim: Test CIM compatibility only" ) diff --git a/pytest_splunk_addon/standard_lib/addon_parser/__init__.py b/pytest_splunk_addon/standard_lib/addon_parser/__init__.py index 0e24a74f3..5bb46611a 100644 --- a/pytest_splunk_addon/standard_lib/addon_parser/__init__.py +++ b/pytest_splunk_addon/standard_lib/addon_parser/__init__.py @@ -18,6 +18,7 @@ from .props_parser import PropsParser from .tags_parser import TagsParser from .eventtype_parser import EventTypeParser +from .savedsearches_parser import SavedSearchParser LOGGER = logging.getLogger("pytest-splunk-addon") @@ -37,6 +38,7 @@ def __init__(self, splunk_app_path): self._props_parser = None self._tags_parser = None self._eventtype_parser = None + self._savedsearch_parser = None @property def app(self): @@ -62,6 +64,12 @@ def eventtype_parser(self): self._eventtype_parser = EventTypeParser(self.splunk_app_path, self.app) return self._eventtype_parser + @property + def savedsearch_parser(self): + if not self._savedsearch_parser: + self._savedsearch_parser = SavedSearchParser(self.splunk_app_path,self.app) + return self._savedsearch_parser + def get_props_fields(self): """ Parse the props.conf and yield all supported fields @@ -88,3 +96,12 @@ def get_eventtypes(self): generator of list of eventtypes """ return self.eventtype_parser.get_eventtypes() + + def get_savedsearches(self): + """ + Parse the App configuration files & yield searchedservices + + Yields: + generator of list of searchedservices + """ + return self.savedsearch_parser.get_savedsearches() diff --git a/pytest_splunk_addon/standard_lib/addon_parser/savedsearches_parser.py b/pytest_splunk_addon/standard_lib/addon_parser/savedsearches_parser.py new file mode 100644 index 000000000..486d1f07e --- /dev/null +++ b/pytest_splunk_addon/standard_lib/addon_parser/savedsearches_parser.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +""" +Provides savedsearches.conf parsing mechanism +""" + +class SavedSearchParser(object): + """ + Parses savedsearches.conf and extracts savedsearches + + Args: + splunk_app_path (str): Path of the Splunk app + app (splunk_appinspect.App): Object of Splunk app + """ + def __init__(self, splunk_app_path, app): + self.app = app + self.splunk_app_path = splunk_app_path + self._savedsearches = None + + @property + def savedsearches(self): + try: + if not self._savedsearches: + self._savedsearches = self.app.get_config("savedsearches.conf") + return self._savedsearches + except OSError: + return None + + def get_savedsearches(self): + """ + Parse the App configuration files & yield savedsearches + + Yields: + generator of list of savedsearches + """ + if not self.savedsearches: + return None + for stanza in self.savedsearches.sects: + savedsearch_sections = self.savedsearches.sects[stanza] + savedsearch_container = { + "stanza" : stanza, + "search" : "index = \"main\"", + "dispatch.earliest_time" : "0", + "dispatch.latest_time" : "now"} + for key in savedsearch_sections.options: + empty_value = ['None','',' '] + if key == "search" and savedsearch_sections.options[key].value not in empty_value: + savedsearch_container[key] = savedsearch_sections.options[key].value + elif key == "dispatch.earliest_time" and savedsearch_sections.options[key].value not in empty_value: + savedsearch_container[key] = savedsearch_sections.options[key].value + elif key == "dispatch.latest_time" and savedsearch_sections.options[key].value not in empty_value: + savedsearch_container[key] = savedsearch_sections.options[key].value + yield savedsearch_container diff --git a/pytest_splunk_addon/standard_lib/fields_tests/test_generator.py b/pytest_splunk_addon/standard_lib/fields_tests/test_generator.py index 34791c3cf..a621867f4 100644 --- a/pytest_splunk_addon/standard_lib/fields_tests/test_generator.py +++ b/pytest_splunk_addon/standard_lib/fields_tests/test_generator.py @@ -41,6 +41,7 @@ def generate_tests(self, fixture): * splunk_app_searchtime_negative * splunk_app_searchtime_eventtypes * splunk_app_searchtime_tags + * splunk_app_searchtime_savedsearches Args: fixture(str): fixture name @@ -54,6 +55,8 @@ def generate_tests(self, fixture): yield from self.generate_tag_tests() elif fixture.endswith("eventtypes") : yield from self.generate_eventtype_tests() + elif fixture.endswith("savedsearches"): + yield from self.generate_savedsearches_tests() def generate_field_tests(self, is_positive): """ @@ -143,6 +146,18 @@ def generate_eventtype_tests(self): id="eventtype::{stanza}".format(**each_eventtype) ) + def generate_savedsearches_tests(self): + """ + Generate test case for savedsearches + + Yields: + pytest.params for the test templates + """ + for each_savedsearch in self.addon_parser.get_savedsearches(): + yield pytest.param( + each_savedsearch, + id="{stanza}".format(**each_savedsearch)) + def _contains_classname(self, fields_group, criteria): """ Check if the field_group dictionary contains the classname diff --git a/pytest_splunk_addon/standard_lib/fields_tests/test_templates.py b/pytest_splunk_addon/standard_lib/fields_tests/test_templates.py index d7caac325..482441a3a 100644 --- a/pytest_splunk_addon/standard_lib/fields_tests/test_templates.py +++ b/pytest_splunk_addon/standard_lib/fields_tests/test_templates.py @@ -264,3 +264,53 @@ def test_eventtype( f"No result found for the search.\nsearch={search}\n" f"interval={splunk_search_util.search_interval}, retries={splunk_search_util.search_retry}" ) + + + @pytest.mark.splunk_searchtime_fields + @pytest.mark.splunk_searchtime_fields_savedsearches + def test_savedsearches( + self, + splunk_search_util, + splunk_ingest_data, + splunk_setup, + splunk_searchtime_fields_savedsearches, + record_property, + caplog, + ): + """ + Tests if all savedsearches in savedsearches.conf are being executed properly to generate proper results. + + Args: + splunk_search_util (fixture): + Fixture to create a simple connection to Splunk via SplunkSDK + splunk_searchtime_fields_savedsearches (fixture): + Fixture containing list of savedsearches + record_property (fixture): + Used to add user properties to test report + caplog (fixture): + Access and control log capturing + + Returns: + Asserts whether test case passes or fails. + """ + search_query = splunk_searchtime_fields_savedsearches["search"] + earliest_time = splunk_searchtime_fields_savedsearches["dispatch.earliest_time"] + latest_time = splunk_searchtime_fields_savedsearches["dispatch.latest_time"] + + temp_search_query = search_query.split('|') + temp_search_query[0] += " earliest_time = {0} latest_time = {1} ".format(earliest_time,latest_time) + search_query = "|".join(temp_search_query) + + search = (f"search {search_query}") + + self.logger.info(f"Search: {search}") + + result = splunk_search_util.checkQueryCountIsGreaterThanZero( + search, interval=splunk_search_util.search_interval, retries=splunk_search_util.search_retry + ) + + record_property("search", search) + assert result, ( + f"No result found for the search.\nsearch={search}\n" + f"interval={splunk_search_util.search_interval}, retries={splunk_search_util.search_retry}" + ) diff --git a/tests/addons/TA_broken/default/savedsearches.conf b/tests/addons/TA_broken/default/savedsearches.conf new file mode 100644 index 000000000..acf8ed697 --- /dev/null +++ b/tests/addons/TA_broken/default/savedsearches.conf @@ -0,0 +1,49 @@ +######## All the scenarios mentioned here are expected to FAIL ######## +# Copyright Splunk + +[negative_search] +# Component tested: index = _internal sourcetype = splunk_d earliest_time = 0 latest_time = now +# Scenario: savedsearch with negative search query so the plugin tests if the savedsearch fails for the negative search query. +# Expected result : FAIL +search = index = _internal sourcetype = splunk_d + +[search_invalid_earliest_latest] +# Component tested: index = _internal sourcetype = splunkd earliest_time = now latest_time = -1d +# Scenario: savedsearch with search query and invalid values of earliest_time and latest_time so the plugin tests +## if the savedsearch fails for invalid values of earliest_time and latest_time. +# Expected result : FAIL +search = index = _internal sourcetype = splunkd +dispatch.earliest_time = now +dispatch.latest_time = -1d + +[search_invalid_earliest] +# Component tested: index = _internal sourcetype = splunkd earliest_time = now latest_time = now +# Scenario: savedsearch with search query and invalid value of earliest_time so the plugin tests +## if the savedsearch fails for the invalid value of earliest_time. +# Expected result : FAIL +search = index = _internal sourcetype = splunkd +dispatch.earliest_time = now + +[search_invalid_latest] +# Component tested: index = _internal sourcetype = splunkd earliest_time = 0 latest_time = 0 +# Scenario: savedsearch with search query and invalid value of latest_time so the plugin tests +## if the savedsearch fails for the invalid value of latest_time. +# Expected result : FAIL +search = index = _internal sourcetype = splunkd +dispatch.latest_time = 0 + +[no_search] +# Component tested: index = "main" earliest_time = -3s latest_time = -1s +# Scenario: savedsearch without search query so the plugin tests if the savedsearch fails +## for default values of search = index="main". +# Expected result : FAIL +dispatch.earliest_time = -3s +dispatch.latest_time = -1s + +[empty_search_earliest_time] +# Component tested: index = "main" earliest_time = -5s latest_time = now +# Scenario: savedsearch with empty search query and without latest_time so the plugin tests if +## the savedsearch fails for search = index="main" and earliest_time = -5s. +# Expected result : FAIL +search = +dispatch.earliest_time = -5s diff --git a/tests/addons/TA_fiction/default/savedsearches.conf b/tests/addons/TA_fiction/default/savedsearches.conf new file mode 100644 index 000000000..dddaff315 --- /dev/null +++ b/tests/addons/TA_fiction/default/savedsearches.conf @@ -0,0 +1,54 @@ +######## All the scenarios mentioned here are expected to PASS ######## +# Copyright Splunk + +[search_earliest_time_latest_time] +# Component tested: index = _internal earliest_time = -7d latest_time = now | table host,source, sourcetype +# Scenario: savedsearch with search query, earliest_time and latest_time so the plugin tests if the savedsearch works. +# Expected result : PASS +search = index = _internal | table host,source, sourcetype +dispatch.earliest_time = -7d +dispatch.latest_time = now + +[basic_search] +# Component tested: index = _internal earliest_time = 0 latest_time = now | stats count by sourcetype +# Scenario: savedsearch with search query so the plugin tests if the savedsearch works for default values of +## earliest_time = 0 and latest_time = now. +# Expected result : PASS +search = index = _internal | stats count by sourcetype + +[search_earliest_time] +# Component tested: index = _internal earliest_time = -4d latest_time = now | stats count by sourcetype | outputlookup saved_search_data.csv +# Scenario: savedsearch with search query and earliest_time so the plugin tests if the savedsearch works for +## default value of latest_time = now. +# Expected result : PASS +search = index = _internal | stats count by sourcetype | outputlookup saved_search_data.csv +dispatch.earliest_time = -4d + +[search_latest_time] +# Component tested: index = _internal earliest_time = 0 latest_time = -1h +# Scenario: savedsearch with search query and latest_time so the plugin tests if the savedsearch works for +## default value of earliest_time = 0. +# Expected result : PASS +search = index = _internal +dispatch.latest_time = -1s + +[empty_search] +# Component tested: index = "main" earliest_time = 0 latest_time = now +# Scenario: savedsearch with empty search query, without earliest_time and latest_time so the plugin tests if +## the savedsearch works for default values of search = index="main", earliest_time = 0 and latest_time = now. +# Expected result : PASS +search = + +[empty_search_latest_time] +# Component tested: index = "main" earliest_time = 0 latest_time = -1s +# Scenario: savedsearch with empty search query and without earliest_time so the plugin tests if +## the savedsearch works for default values of search = index="main" and earliest_time = 0. +# Expected result : PASS +search = +dispatch.latest_time = -1s + +[no_search_no_time] +# Component tested: index = "main" earliest_time = 0 latest_time = now +# Scenario: savedsearch without search query, earliest_time and latest_time so the plugin tests if the savedsearch works +## for default values of search = index="main", earliest_time = 0 and latest_time = now. +# Expected result : PASS diff --git a/tests/constants.py b/tests/constants.py index 5183ac8eb..336ba5e92 100644 --- a/tests/constants.py +++ b/tests/constants.py @@ -228,6 +228,13 @@ '*test_splunk_app_fiction.py::Test_App::test_eventtype*eventtype::fiction_is_splunkd* PASSED*', '*test_splunk_app_fiction.py::Test_App::test_eventtype*eventtype::fiction_for_tags_positive* PASSED*', '*test_splunk_app_fiction.py::Test_App::test_eventtype*eventtype::fiction_is_splunkd-%host%* PASSED*', + '*test_splunk_app_fiction.py::Test_App::test_savedsearches*search_earliest_time_latest_time* PASSED*', + '*test_splunk_app_fiction.py::Test_App::test_savedsearches*basic_search* PASSED*', + '*test_splunk_app_fiction.py::Test_App::test_savedsearches*search_earliest_time* PASSED*', + '*test_splunk_app_fiction.py::Test_App::test_savedsearches*search_latest_time* PASSED*', + '*test_splunk_app_fiction.py::Test_App::test_savedsearches*empty_search* PASSED*', + '*test_splunk_app_fiction.py::Test_App::test_savedsearches*empty_search_latest_time* PASSED*', + '*test_splunk_app_fiction.py::Test_App::test_savedsearches*no_search_no_time* PASSED*', ] @@ -358,6 +365,12 @@ '*test_splunk_app_broken.py::Test_App::test_tags*source="/opt/splunk/var/log/splunk/splunkd.log"::tag::tags_negative_testing* FAILED*', '*test_splunk_app_broken.py::Test_App::test_eventtype*eventtype::broken_is_splunkd* FAILED*', '*test_splunk_app_broken.py::Test_App::test_eventtype*eventtype::broken_is_splunkd-%host%* FAILED*', + '*test_splunk_app_broken.py::Test_App::test_savedsearches*search_invalid_earliest_latest* FAILED*', + '*test_splunk_app_broken.py::Test_App::test_savedsearches*search_invalid_earliest* FAILED*', + '*test_splunk_app_broken.py::Test_App::test_savedsearches*negative_search* FAILED*', + '*test_splunk_app_broken.py::Test_App::test_savedsearches*search_invalid_latest* FAILED*', + '*test_splunk_app_broken.py::Test_App::test_savedsearches*no_search* FAILED*', + '*test_splunk_app_broken.py::Test_App::test_savedsearches*empty_search_earliest_time* FAILED*', ] """ @@ -610,6 +623,7 @@ '*test_splunk_fiction_indextime.py::Test_App::test_cim_fields_not_allowed_in_search*splunk_searchtime_cim_fields_not_allowed_in_search0* SKIPPED*', '*test_splunk_fiction_indextime.py::Test_App::test_tags*splunk_searchtime_fields_tags0* SKIPPED*', '*test_splunk_fiction_indextime.py::Test_App::test_eventtype*splunk_searchtime_fields_eventtypes0* SKIPPED*', + '*test_splunk_fiction_indextime.py::Test_App::test_savedsearches*splunk_searchtime_fields_savedsearches0* SKIPPED*', ] """ Define the TA_fiction_indextime_broken add-on passed test case list. @@ -698,4 +712,5 @@ '*test_splunk_fiction_indextime_broken.py::Test_App::test_props_fields_no_dash_not_empty*splunk_searchtime_fields_negative0* SKIPPED*', '*test_splunk_fiction_indextime_broken.py::Test_App::test_tags*splunk_searchtime_fields_tags0* SKIPPED*', '*test_splunk_fiction_indextime_broken.py::Test_App::test_eventtype*splunk_searchtime_fields_eventtypes0* SKIPPED*', + '*test_splunk_fiction_indextime_broken.py::Test_App::test_savedsearches*splunk_searchtime_fields_savedsearches0* SKIPPED*', ]