From 9e8bcba48f905a4209df232ad80df46c1c77b6d2 Mon Sep 17 00:00:00 2001 From: "Everton H. Taques" <97463920+etaques@users.noreply.github.com> Date: Mon, 20 Feb 2023 11:55:32 -0300 Subject: [PATCH] Add changes on production environment (#2232) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [FIX] orb helm url (#2154) [FIX] orb helm url (#2154) * [FIX] chart lock url on kind environment (#2156) [FIX] chart lock url on kind environment (#2156) * new status for policy with no tap match (#2155) * [ORB-203] Enable sink status update by listening to otel-collector logs. (#2135) [ORB-203] Enable sink status update by listening to otel-collector logs. (#2135) * [FIX] change organization to orb-community (#2157) [FIX] change organization to orb-community (#2157) * feat(maestro): fix message with error in mid-sentence. (#2158) feat(maestro): fix message with error in mid-sentence. (#2158) * fix(fleet): fix group tags editing - make it unable to delete tags (#2152) * fix group tags editing - make it unable to delete tags * typo(agent): fix no_tap_match status typo * fix(fleet): partial updates for agent and endpoint unit tests * fix(fleet): partial updates for agent and service unit tests * Remove otel collector health_check extension unused (#2159) Remove otel collector health_check extension unused (#2159) * Removing logging extension on otel collector (#2160) Removing logging extension on otel collector (#2160) * [FIX] DeployCollector function (#2162) [FIX] DeployCollector function (#2162) * Update README.md (#2161) * Update README.md * [FIX] adding ownerid on decode GRPC sinks response (#2163) * fix(maestro): sinks status fix on active and errored sinks * [FIX] on logic for active and error sinks status * symplify analyzeLogs function * lastRemoteWrite is actually controlled by sinker * fix message * remove comments * remove unnecessary * deployment status should be internally controlled * fix err variable * add comments * fix comment * fix comments * add error if collector was not found * fix error * add changes * add changes * fix maestro lastactivity * fix maestro lastactivity * fix condition for idle * remove idle control on sinker * deployment check * feat(migrate): add migrate service to default and support openTelemetry in all sinks (#2150) * feat(migrate): add migrate service, to add/remove openTelemetry, add control flag there to be able to rollback if necessary. Signed-off-by: Luiz Pegoraro * feat(sinker): remove skip, and add deprecation message. Signed-off-by: Luiz Pegoraro * feat(sinker): fix deprecation message. Signed-off-by: Luiz Pegoraro * feat(sinks): add default values for OpenTelemetry sink. Signed-off-by: Luiz Pegoraro * feat(ui): remove sinks OpenTelemetry flag. Signed-off-by: Luiz Pegoraro * feat(sinks): fix opentelemetry tag case Signed-off-by: Luiz Pegoraro * feat(UI): remove OpenTelemetry flag. Signed-off-by: Luiz Pegoraro * feat(makefile): add kubectl rollout in makefile. Signed-off-by: Luiz Pegoraro * feat(migrate): fix identation. Signed-off-by: Luiz Pegoraro * feat(charts): testing chart migrate true. Signed-off-by: Luiz Pegoraro * feat(maestro): add otel flag enabled as true. Signed-off-by: Luiz Pegoraro * feat(maestro): re-add after testing. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * [FIX] refactoring maestro event sourcing for sink status (#2165) [FIX] refactoring maestro event sourcing for sink status (#2165) * Fix delete sinks on maestro (#2167) Fix delete sinks on maestro (#2167) * [Feat] Remove otel collector when it is idle (#2168) * feat(maestro): update cache with creation of yaml. (#2169) Signed-off-by: Luiz Pegoraro * fix(maestro): fix failed to parse json (#2170) * feat(maestro): update cache with creation of yaml. Signed-off-by: Luiz Pegoraro * feat(maestro): fix parse of json. Signed-off-by: Luiz Pegoraro * feat(maestro): add debug log. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * [FIX] maestro event sourcing to update otel collector (#2171) [FIX] maestro event sourcing to update otel collector (#2171) * increasing timeout for sink status because of otel (#2172) * feat(migrate): prevent previous migration from executing in migrate up/down. (#2174) Signed-off-by: Luiz Pegoraro * feat(agent): fix context propagation for failure in otel (#2166) * feat(agent): fix context propagation for failure in otel Signed-off-by: Luiz Pegoraro * feat(agent): add context propagation. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * fix(sinks): fix merging of the sinks tags and config. (#2176) * fix(sinks): fix merging of the sinks tags and config. Signed-off-by: Luiz Pegoraro * fix(sinks): fix tests Signed-off-by: Luiz Pegoraro * fix(sinker): fix not removing sinker cache when removing sink. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * [FIX] do not skip when lastActivity not found on redis (#2175) * do not skip when lastactivity not found * [FIX] only check last activity after error * fix variables * fix variables --------- Co-authored-by: Luiz Henrique Pegoraro * fix(maestro): fix state change to remove the local cache, refactored code, to only redis package have redisClient (#2177) * fix(maestro): replace local cache for wait mechanism in getting the deployment state Signed-off-by: Luiz Pegoraro * fix(maestro): add secondary step validation Signed-off-by: Luiz Pegoraro * fix(maestro): focused every redis integration into redis eventStream service. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * fix(maestro): fix maestro redis proper client. (#2178) * fix(maestro): fix maestro redis proper client. Signed-off-by: Luiz Pegoraro * fix(maestro): reduce time to check for pod creation Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * fix(maestro): added fine tuning to maestro. (#2179) Signed-off-by: Luiz Pegoraro * Fine tuning maestro (#2180) * fix(maestro): added fine tuning to maestro. Signed-off-by: Luiz Pegoraro * fix(maestro): added fine tuning to maestro. Signed-off-by: Luiz Pegoraro * fix(maestro): more fine tuning in wait periods for k8s Signed-off-by: Luiz Pegoraro * fix(maestro): more fine-tuning in wait periods for k8s. Signed-off-by: Luiz Pegoraro * fix(sinks): add remote_host validation to prevent users to create sinks with invalid url Signed-off-by: Luiz Pegoraro * fix(sinks): fix tests to adapt to new error Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * Sink error message (#2182) * fix(maestro): added fine tuning to maestro. Signed-off-by: Luiz Pegoraro * fix(maestro): added fine tuning to maestro. Signed-off-by: Luiz Pegoraro * fix(maestro): more fine tuning in wait periods for k8s Signed-off-by: Luiz Pegoraro * fix(maestro): more fine-tuning in wait periods for k8s. Signed-off-by: Luiz Pegoraro * fix(sinks): add remote_host validation to prevent users to create sinks with invalid url Signed-off-by: Luiz Pegoraro * fix(sinks): fix tests to adapt to new error Signed-off-by: Luiz Pegoraro * fix(sinks): fix error message. Signed-off-by: Luiz Pegoraro * fix(maestro-): extracted change of status. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * Sinker ifs state (#2183) * fix(sinker): fix if nesting Signed-off-by: Luiz Pegoraro * fix(sinker): fix if nesting Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * feat(maestro): fix error messages (#2184) * fix(sinker): fix if nesting Signed-off-by: Luiz Pegoraro * fix(sinker): fix if nesting Signed-off-by: Luiz Pegoraro * fix(maestro): warning as error. Signed-off-by: Luiz Pegoraro * fix(maestro): fix casing. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * changing scenario descriptions for dry-run execution (#2185) * Update README.md (#2181) * Dns and Net v2 metrics test (#2173) * Update README.md * updating kind readme * testing metrics of dns v2 * testing metrics of dns v2 * metrics net handler v2 * net config * fix(maestro): attempt of different approach on idle and active (#2186) * fix(sinker): fix if nesting Signed-off-by: Luiz Pegoraro * fix(maestro): refactored maestro monitor logic, and add logs to have traces on otel-sinker. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * inserting tag on test scenarios with error status on sink (#2187) * fix(maestro): fix maestro reading events. (#2188) Signed-off-by: Luiz Pegoraro * fix(maestro): fix maestro reading redis and groups (#2189) * fix(maestro): fix maestro reading redis and groups Signed-off-by: Luiz Pegoraro * fix(maestro): change approach on apply/delete kubernetes clusters Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * fix(maestro): re-added maestro stream. (#2190) Signed-off-by: Luiz Pegoraro * fix(maestro): fix monitor logic (#2191) * fix(maestroStream): remove duplicate event publishing Signed-off-by: Luiz Pegoraro * fix(maestro): enhanced logic from monitor. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * fix(maestro): fix iteration to be more performatic and less error prone. (#2192) Signed-off-by: Luiz Pegoraro * Fix iteration (#2193) * fix(maestro): fix iteration to be more performatic and less error prone. Signed-off-by: Luiz Pegoraro * fix(maestro): fix nil reference error Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * Orb 694 tests otel migration (#2194) * scenarios to make easier test otel migration * fix(maestro): fix new nil reference error (#2195) Signed-off-by: Luiz Pegoraro * fix(maestro): fix extract id. (#2196) Signed-off-by: Luiz Pegoraro * fix(maestro): clean up code and create wait mechanism for apply deployments (#2197) * fix(maestro): fix extract id. Signed-off-by: Luiz Pegoraro * fix(maestro): clean up code and enhanced verification of collector apply. Signed-off-by: Luiz Pegoraro * fix(maestro): fix method calls. Signed-off-by: Luiz Pegoraro * fix(maestro): fix method calls. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * fix(maestro): change from pod, to deployment and added logs for matching items (#2198) Signed-off-by: Luiz Pegoraro * Small changes on README (#2199) * small changes * Update README.md * fix(redis): Add more logs (#2200) * fix(makefile): clean up docker-compose. Signed-off-by: Luiz Pegoraro * fix(redis): add debug logs Signed-off-by: Luiz Pegoraro * fix(makefile): remove sleep, just retry. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * [FIX] update kind manifests to 1.0.44 (#2203) * fix kind * fix kind * [fix] kind * tests to validate private image (#2202) * Revert "fix(redis): add debug logs" (#2205) * Revert "fix(redis): add debug logs" This reverts commit 44f4b98d92e89905015f3e2a0ec9961b8928f977. * fix(redis): re-add fix on iteration Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * fix(redis): fixing how maestro and sinker handles errors in reading and handling event errors (#2206) * fix(redis): fix breaking out of reading routine because of failure. Signed-off-by: Luiz Pegoraro * fix(redis): fix breaking out of reading routine because of failure. Signed-off-by: Luiz Pegoraro * fix(redis): clean up. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * fix(maestro): fix acking messages on error. (#2207) Signed-off-by: Luiz Pegoraro * add prepare-helm first on run (#2209) * fix(maestro): new configuration handling and creation on kubernetes. (#2211) Signed-off-by: Luiz Pegoraro * fix(maestro): reverting back to file management due to throttling of … (#2212) * fix(maestro): reverting back to file management due to throttling of k8s api. Signed-off-by: Luiz Pegoraro * fix(maestro): fix reversion Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * fix(maestro): remove idle validations. (#2214) Signed-off-by: Luiz Pegoraro * [ORB-673] Set default path for orb-agent.db (#2213) [ORB-673] Set default path for orb-agent.db (#2213) * WIP: hotfix general css and dataset list table spacings (#2208) * fix(maestro): rollback to complete deployment json (#2216) Signed-off-by: Luiz Pegoraro * [ORB-669] - Add more changes on README (#2215) * add more changes on README * fix gh organization to orb-community * fix(maestro): remove only changing status in active. to go back from errors like 429. (#2217) Signed-off-by: Luiz Pegoraro * fix(maestro): fix sink deletion propagation to collector and creation event status (#2218) * fix(maestro): remove only changing status in active. to go back from errors like 429. Signed-off-by: Luiz Pegoraro * fix(maestro): alternative fix to delete collectors and create collectors correctly. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro Co-authored-by: Everton H. Taques <97463920+etaques@users.noreply.github.com> * rollback the changes on db file (#2219) * fix(sinker): fix active sink not updating (#2220) Signed-off-by: Luiz Pegoraro * fix(sinker): add activity in metric push in sinker, on change from idle/unknown to active (#2221) * fix(sinker): fix active sink not updating Signed-off-by: Luiz Pegoraro * Revert "fix(sinker): fix active sink not updating" This reverts commit 5470945d818126996db02e322ad14ff8c6d64f97. * fix(maestro): alternative fix on sink creation. Signed-off-by: Luiz Pegoraro * fix(maestro): updating timestamp. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro Co-authored-by: Everton H. Taques <97463920+etaques@users.noreply.github.com> * [FIX] set last activity on redis if sink already is active (#2222) * fix active * disable go routines to test * Update streams.go * add logs * fix(maestro): add subgroup to sinker and fix old timestamped logs interfering with status (#2224) * fix(maestro): ignore old messages, set as active. Signed-off-by: Luiz Pegoraro * fix(sinker): added different group for sinker when otel is enabled. Signed-off-by: Luiz Pegoraro * fix(sinker): added different group for sinker when otel is enabled. Signed-off-by: Luiz Pegoraro --------- Signed-off-by: Luiz Pegoraro * fix(sinker): don't fetch logs older than 5 minutes. (#2225) Signed-off-by: Luiz Pegoraro * fix install kubectl (#2226) * fix install kubectl * Update Makefile * fix(maestro): fix deletion. (#2227) Signed-off-by: Luiz Pegoraro * feat(maestro): fix subscription (#2229) Signed-off-by: Luiz Pegoraro * [ORB-689] sinks status synchronization (#2231) * Update monitor.go * Update kubecontrol.go * change to 10 minutes to be idle * set to change if idle be active if have activity * back to active after idle * Update kubecontrol.go * increase idle time to 15 minutes * fix idle * fix sinker activity * add changes * changes * add changes * add changes * add changes * add changes * add changes * add changes * add changes * add changes * add changes * add changes * add changes * increasing timeout for test sink status (#2233) --------- Signed-off-by: Luiz Pegoraro Co-authored-by: manrodrigues Co-authored-by: Luiz Henrique Pegoraro Co-authored-by: Mariana Cavalcante Co-authored-by: Guilhermo Pazuch <1490938+gpazuch@users.noreply.github.com> --- .github/workflows/go-develop.yml | 2 +- .github/workflows/go-production.yml | 2 +- .gitignore | 3 + Makefile | 13 +- README.md | 33 +- agent/backend/pktvisor/scrape.go | 9 +- agent/otel/bridgeservice.go | 27 +- agent/otel/otlpmqttexporter/otlp.go | 5 +- agent/policies/types.go | 2 +- cmd/maestro/main.go | 62 +- cmd/migrate/main.go | 9 +- fleet/agent_group_service.go | 4 +- fleet/agent_group_service_test.go | 36 +- fleet/agent_groups.go | 2 +- fleet/agent_service.go | 3 + fleet/agent_service_test.go | 57 +- fleet/agents.go | 2 +- fleet/api/grpc/endpoint.go | 2 +- fleet/api/http/endpoint.go | 50 +- fleet/api/http/endpoint_test.go | 129 ++-- fleet/api/http/requests.go | 27 +- fleet/comms_test.go | 12 +- fleet/postgres/agent_groups.go | 14 +- fleet/postgres/agent_groups_test.go | 26 +- fleet/postgres/agents.go | 11 +- fleet/postgres/agents_test.go | 44 +- go.mod | 25 +- go.sum | 104 +++- kind/Chart.lock | 8 +- kind/Chart.yaml | 4 +- kind/README.md | 18 +- kind/values.yaml | 4 +- maestro/config/config_builder.go | 230 +++++++- maestro/config/config_builder_test.go | 2 +- maestro/config/types.go | 8 +- maestro/kubecontrol/config_parse.go | 102 ++++ maestro/kubecontrol/kubecontrol.go | 139 +++-- maestro/monitor/monitor.go | 301 ++++++++++ maestro/redis/consumer/events.go | 27 - maestro/redis/consumer/hashset.go | 157 ++++- maestro/redis/consumer/streams.go | 186 +++--- maestro/redis/events.go | 49 ++ maestro/service.go | 112 ++-- migrate/migration/m3_enable_otel_all_sinks.go | 113 ++++ pkg/errors/errors.go | 2 +- pkg/types/maps.go | 24 +- policies/postgres/policies.go | 4 - python-test/README.md | 21 +- .../check_agent_groups_details.md | 17 - ...ssible_cancel_operations_with_no_change.md | 17 - ...groups_on_agent_groups'_page_is_correct.md | 20 - .../create_agent_group_with_description.md | 12 - .../create_agent_group_with_duplicate_name.md | 13 - ...e_agent_group_with_invalid_name_(regex).md | 15 - .../create_agent_group_with_multiple_tags.md | 12 - .../create_agent_group_with_one_tag.md | 12 - .../create_agent_group_without_description.md | 12 - .../create_agent_group_without_tag.md | 13 - .../edit_agent_group_description.md | 17 - ..._group_description_removing_description.md | 17 - ..._agent_group_name,_description_and_tags.md | 17 - .../agent_groups/edit_agent_group_name.md | 17 - .../edit_agent_group_name_removing_name.md | 17 - .../edit_agent_group_removing_tags.md | 17 - .../docs/agent_groups/edit_agent_group_tag.md | 18 - ...dit_agent_group_tags_to_subscribe_agent.md | 13 - ...t_agent_group_tags_to_unsubscribe_agent.md | 11 - ...n_agent_group_through_the_details_modal.md | 13 - .../remove_agent_group_using_correct_name.md | 16 - ...remove_agent_group_using_incorrect_name.md | 14 - .../agent_groups/test_agent_groups_filters.md | 22 - .../agent_groups/visualize_matching_agents.md | 10 - .../docs/agents/check_agent_details.md | 18 - ...ssible_cancel_operations_with_no_change.md | 16 - ..._total_agent_on_agents'_page_is_correct.md | 19 - .../create_agent_with_duplicate_name.md | 13 - .../create_agent_with_invalid_name_(regex).md | 15 - .../agents/create_agent_with_multiple_tags.md | 12 - .../docs/agents/create_agent_with_one_tag.md | 12 - .../docs/agents/create_agent_without_tags.md | 13 - python-test/docs/agents/edit_agent_name.md | 18 - .../docs/agents/edit_agent_name_and_tags.md | 17 - python-test/docs/agents/edit_agent_tag.md | 19 - ...edit_an_agent_through_the_details_modal.md | 14 - ...ert_tags_in_agents_created_without_tags.md | 17 - .../agents/remove_agent_using_correct_name.md | 14 - .../remove_agent_using_incorrect_name.md | 15 - .../run_two_orb_agents_on_different_ports.md | 8 - .../run_two_orb_agents_on_the_same_port.md | 9 - .../docs/agents/save_agent_without_tag.md | 16 - python-test/docs/agents/test_agent_filters.md | 19 - .../docs/datasets/check_datasets_details.md | 17 - ...ssible_cancel_operations_with_no_change.md | 14 - ...l_datasets_on_datasets'_page_is_correct.md | 20 - python-test/docs/datasets/create_dataset.md | 12 - ...reate_dataset_with_invalid_name_(regex).md | 15 - ...dit_a_dataset_through_the_details_modal.md | 13 - .../docs/datasets/edit_dataset_name.md | 17 - .../docs/datasets/edit_dataset_sink.md | 17 - .../remove_dataset_using_correct_name.md | 14 - .../remove_dataset_using_incorrect_name.md | 14 - .../docs/datasets/test_datasets_filter.md | 17 - python-test/docs/development_guide.md | 214 ------- python-test/docs/img/ORB-logo-ring.png | Bin 10902 -> 0 bytes python-test/docs/index.md | 187 ------ ...ion_to_group_after_editing_agent's_tags.md | 43 -- ...ith_policies_after_editing_agent's_tags.md | 40 -- ...tags_editing_tags_after_agent_provision.md | 17 - ...ags_editing_tags_before_agent_provision.md | 16 - ...tags_editing_tags_after_agent_provision.md | 15 - ...ags_editing_tags_before_agent_provision.md | 16 - ...tags_editing_tags_after_agent_provision.md | 16 - ...ags_editing_tags_before_agent_provision.md | 16 - ...tags_editing_tags_after_agent_provision.md | 15 - ...ags_editing_tags_before_agent_provision.md | 16 - .../integration/apply_multiple_policies.md | 38 -- .../docs/integration/apply_policy_twice.md | 18 - .../integration/change_sink_on_dataset.md | 20 - ...t_agent_name_and_apply_policies_to_then.md | 16 - ...ply_policies_to_group_matching_new_tags.md | 15 - .../multiple_agents_subscribed_to_a_group.md | 18 - .../provision_agent_after_group.md | 12 - .../provision_agent_before_group.md | 12 - python-test/docs/integration/remove_agent.md | 15 - .../integration/remove_agent_container.md | 23 - .../remove_agent_container_force.md | 23 - .../docs/integration/remove_all_datasets.md | 18 - .../docs/integration/remove_all_policies.md | 18 - .../docs/integration/remove_dataset.md | 16 - python-test/docs/integration/remove_group.md | 0 ...e_dataset_of_multiples_with_same_policy.md | 16 - .../remove_one_of_multiple_datasets.md | 18 - .../remove_one_of_multiple_policies.md | 20 - python-test/docs/integration/remove_policy.md | 17 - python-test/docs/integration/remove_sink.md | 0 .../docs/integration/reset_agent_remotely.md | 39 -- .../sink_active_while_scraping_metrics.md | 35 -- .../sink_error_invalid_credentials.md | 15 - .../docs/integration/sink_idle_30_minutes.md | 16 - ...groups_created_after_agent_provisioning.md | 12 - ...roups_created_before_agent_provisioning.md | 11 - ..._email_and_password_are_required_fields.md | 45 -- .../login/login_with_invalid_credentials.md | 45 -- .../login/login_with_valid_credentials.md | 15 - ..._password_with_registered_email_address.md | 14 - ...assword_with_unregistered_email_address.md | 14 - ...tered_account_using_registered_password.md | 16 - ...t_using_registered_password_and_company.md | 15 - ..._using_registered_password_and_username.md | 16 - ...egistered_password_username_and_company.md | 15 - ...red_account_using_unregistered_password.md | 14 - ...using_unregistered_password_and_company.md | 14 - ...sing_unregistered_password_and_username.md | 15 - ...egistered_password_username_and_company.md | 15 - ...with_invalid_password_and_invalid_email.md | 14 - ...t_with_invalid_password_and_valid_email.md | 14 - ...t_with_valid_password_and_invalid_email.md | 14 - ...unt_with_valid_password_and_valid_email.md | 15 - ...ssible_cancel_operations_with_no_change.md | 22 - ...l_policies_on_policies'_page_is_correct.md | 20 - .../docs/policies/check_policies_details.md | 15 - ...out_insert_new_name_and_1_with_new_name.md | 25 - ...reate_4_duplicated_policy_with_new_name.md | 20 - ...ted_dhcp_policy_without_insert_new_name.md | 20 - ...ated_dns_policy_without_insert_new_name.md | 20 - ...ated_net_policy_without_insert_new_name.md | 20 - .../create_policy_with_description.md | 12 - .../create_policy_with_dhcp_handler.md | 10 - .../create_policy_with_dns_handler.md | 54 -- .../create_policy_with_duplicate_name.md | 13 - ...create_policy_with_invalid_name_(regex).md | 15 - .../create_policy_with_multiple_handlers.md | 12 - .../create_policy_with_net_handler.md | 12 - ...create_policy_with_no_agent_provisioned.md | 12 - .../create_policy_without_description.md | 12 - ...edit_a_policy_through_the_details_modal.md | 13 - .../edit_policy_bpf_filter_expression.md | 18 - .../docs/policies/edit_policy_description.md | 17 - .../docs/policies/edit_policy_handler.md | 18 - .../edit_policy_host_specification.md | 18 - python-test/docs/policies/edit_policy_name.md | 17 - .../policies/edit_policy_only_qname_suffix.md | 17 - .../docs/policies/edit_policy_only_rcode.md | 17 - .../docs/policies/edit_policy_pcap_source.md | 17 - .../remove_policy_using_correct_name.md | 16 - .../remove_policy_using_incorrect_name.md | 14 - .../docs/policies/test_policy_filters.md | 19 - python-test/docs/sanity.md | 165 ------ ...ssible_cancel_operations_with_no_change.md | 24 - ..._password_are_required_to_create_a_sink.md | 49 -- ...f_total_sinks_on_sinks'_page_is_correct.md | 21 - python-test/docs/sinks/check_sink_details.md | 19 - .../sinks/create_sink_with_description.md | 12 - .../sinks/create_sink_with_duplicate_name.md | 14 - .../create_sink_with_invalid_name_(regex).md | 14 - .../sinks/create_sink_with_multiple_tags.md | 12 - .../docs/sinks/create_sink_with_tags.md | 12 - .../sinks/create_sink_without_description.md | 12 - .../docs/sinks/create_sink_without_tags.md | 12 - .../edit_a_sink_through_the_details_modal.md | 13 - .../docs/sinks/edit_sink_description.md | 19 - python-test/docs/sinks/edit_sink_name.md | 18 - python-test/docs/sinks/edit_sink_password.md | 18 - .../docs/sinks/edit_sink_remote_host.md | 18 - python-test/docs/sinks/edit_sink_tags.md | 18 - python-test/docs/sinks/edit_sink_username.md | 18 - .../sinks/remove_sink_using_correct_name.md | 15 - .../sinks/remove_sink_using_incorrect_name.md | 15 - python-test/docs/sinks/test_sink_filters.md | 23 - python-test/docs/smoke.md | 113 ---- python-test/features/integration.feature | 84 ++- .../features/integration_config_file.feature | 108 ++-- python-test/features/metrics.feature | 550 ++++++++++++++++-- python-test/features/migration.feature | 93 +++ python-test/features/policies.feature | 12 - .../steps/control_plane_agent_groups.py | 2 +- .../features/steps/control_plane_agents.py | 27 + .../features/steps/control_plane_policies.py | 13 +- .../features/steps/control_plane_sink.py | 63 +- python-test/features/steps/local_agent.py | 39 +- python-test/features/steps/metrics.py | 140 ++++- sinker/backend/pktvisor/pktvisor.go | 2 + sinker/config/repo.go | 5 + sinker/config/types.go | 3 + sinker/config_state_check.go | 12 +- sinker/message_handler.go | 9 +- sinker/otel/bridgeservice/bridge.go | 53 +- sinker/redis/consumer/streams.go | 20 +- sinker/redis/producer/streams.go | 52 +- sinker/redis/sinker.go | 83 ++- sinks/api/grpc/client.go | 2 + sinks/api/grpc/endpoint.go | 2 + sinks/api/grpc/request.go | 1 + sinks/api/grpc/response.go | 1 + sinks/api/grpc/server.go | 1 + sinks/api/http/endpoint_test.go | 20 +- sinks/pb/sinks.pb.go | 55 +- sinks/pb/sinks.proto | 2 + sinks/postgres/sinks.go | 8 +- sinks/redis/consumer/streams.go | 17 +- sinks/redis/producer/streams.go | 27 +- sinks/sinks_service.go | 26 +- sinks/sinks_service_test.go | 14 +- .../fleet/agents/key/agent.key.component.ts | 2 + .../pages/sinks/add/sink.add.component.html | 12 +- .../app/pages/sinks/add/sink.add.component.ts | 7 - .../agent-backends.component.scss | 4 + .../agent-provisioning.component.ts | 2 + .../policy-datasets.component.scss | 1 + .../policy-datasets.component.ts | 25 +- 250 files changed, 3000 insertions(+), 4192 deletions(-) create mode 100644 maestro/kubecontrol/config_parse.go create mode 100644 maestro/monitor/monitor.go delete mode 100644 maestro/redis/consumer/events.go create mode 100644 maestro/redis/events.go create mode 100644 migrate/migration/m3_enable_otel_all_sinks.go delete mode 100644 python-test/docs/agent_groups/check_agent_groups_details.md delete mode 100644 python-test/docs/agent_groups/check_if_is_possible_cancel_operations_with_no_change.md delete mode 100644 python-test/docs/agent_groups/check_if_total_agent_groups_on_agent_groups'_page_is_correct.md delete mode 100644 python-test/docs/agent_groups/create_agent_group_with_description.md delete mode 100644 python-test/docs/agent_groups/create_agent_group_with_duplicate_name.md delete mode 100644 python-test/docs/agent_groups/create_agent_group_with_invalid_name_(regex).md delete mode 100644 python-test/docs/agent_groups/create_agent_group_with_multiple_tags.md delete mode 100644 python-test/docs/agent_groups/create_agent_group_with_one_tag.md delete mode 100644 python-test/docs/agent_groups/create_agent_group_without_description.md delete mode 100644 python-test/docs/agent_groups/create_agent_group_without_tag.md delete mode 100644 python-test/docs/agent_groups/edit_agent_group_description.md delete mode 100644 python-test/docs/agent_groups/edit_agent_group_description_removing_description.md delete mode 100644 python-test/docs/agent_groups/edit_agent_group_name,_description_and_tags.md delete mode 100644 python-test/docs/agent_groups/edit_agent_group_name.md delete mode 100644 python-test/docs/agent_groups/edit_agent_group_name_removing_name.md delete mode 100644 python-test/docs/agent_groups/edit_agent_group_removing_tags.md delete mode 100644 python-test/docs/agent_groups/edit_agent_group_tag.md delete mode 100644 python-test/docs/agent_groups/edit_agent_group_tags_to_subscribe_agent.md delete mode 100644 python-test/docs/agent_groups/edit_agent_group_tags_to_unsubscribe_agent.md delete mode 100644 python-test/docs/agent_groups/edit_an_agent_group_through_the_details_modal.md delete mode 100644 python-test/docs/agent_groups/remove_agent_group_using_correct_name.md delete mode 100644 python-test/docs/agent_groups/remove_agent_group_using_incorrect_name.md delete mode 100644 python-test/docs/agent_groups/test_agent_groups_filters.md delete mode 100644 python-test/docs/agent_groups/visualize_matching_agents.md delete mode 100644 python-test/docs/agents/check_agent_details.md delete mode 100644 python-test/docs/agents/check_if_is_possible_cancel_operations_with_no_change.md delete mode 100644 python-test/docs/agents/check_if_total_agent_on_agents'_page_is_correct.md delete mode 100644 python-test/docs/agents/create_agent_with_duplicate_name.md delete mode 100644 python-test/docs/agents/create_agent_with_invalid_name_(regex).md delete mode 100644 python-test/docs/agents/create_agent_with_multiple_tags.md delete mode 100644 python-test/docs/agents/create_agent_with_one_tag.md delete mode 100644 python-test/docs/agents/create_agent_without_tags.md delete mode 100644 python-test/docs/agents/edit_agent_name.md delete mode 100644 python-test/docs/agents/edit_agent_name_and_tags.md delete mode 100644 python-test/docs/agents/edit_agent_tag.md delete mode 100644 python-test/docs/agents/edit_an_agent_through_the_details_modal.md delete mode 100644 python-test/docs/agents/insert_tags_in_agents_created_without_tags.md delete mode 100644 python-test/docs/agents/remove_agent_using_correct_name.md delete mode 100644 python-test/docs/agents/remove_agent_using_incorrect_name.md delete mode 100644 python-test/docs/agents/run_two_orb_agents_on_different_ports.md delete mode 100644 python-test/docs/agents/run_two_orb_agents_on_the_same_port.md delete mode 100644 python-test/docs/agents/save_agent_without_tag.md delete mode 100644 python-test/docs/agents/test_agent_filters.md delete mode 100644 python-test/docs/datasets/check_datasets_details.md delete mode 100644 python-test/docs/datasets/check_if_is_possible_cancel_operations_with_no_change.md delete mode 100644 python-test/docs/datasets/check_if_total_datasets_on_datasets'_page_is_correct.md delete mode 100644 python-test/docs/datasets/create_dataset.md delete mode 100644 python-test/docs/datasets/create_dataset_with_invalid_name_(regex).md delete mode 100644 python-test/docs/datasets/edit_a_dataset_through_the_details_modal.md delete mode 100644 python-test/docs/datasets/edit_dataset_name.md delete mode 100644 python-test/docs/datasets/edit_dataset_sink.md delete mode 100644 python-test/docs/datasets/remove_dataset_using_correct_name.md delete mode 100644 python-test/docs/datasets/remove_dataset_using_incorrect_name.md delete mode 100644 python-test/docs/datasets/test_datasets_filter.md delete mode 100644 python-test/docs/development_guide.md delete mode 100644 python-test/docs/img/ORB-logo-ring.png delete mode 100644 python-test/docs/index.md delete mode 100644 python-test/docs/integration/agent_subscription_to_group_after_editing_agent's_tags.md delete mode 100644 python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags.md delete mode 100644 python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md delete mode 100644 python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md delete mode 100644 python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md delete mode 100644 python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md delete mode 100644 python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md delete mode 100644 python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md delete mode 100644 python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md delete mode 100644 python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md delete mode 100644 python-test/docs/integration/apply_multiple_policies.md delete mode 100644 python-test/docs/integration/apply_policy_twice.md delete mode 100644 python-test/docs/integration/change_sink_on_dataset.md delete mode 100644 python-test/docs/integration/edit_agent_name_and_apply_policies_to_then.md delete mode 100644 python-test/docs/integration/insert_tags_in_agents_created_without_tags_and_apply_policies_to_group_matching_new_tags.md delete mode 100644 python-test/docs/integration/multiple_agents_subscribed_to_a_group.md delete mode 100644 python-test/docs/integration/provision_agent_after_group.md delete mode 100644 python-test/docs/integration/provision_agent_before_group.md delete mode 100644 python-test/docs/integration/remove_agent.md delete mode 100644 python-test/docs/integration/remove_agent_container.md delete mode 100644 python-test/docs/integration/remove_agent_container_force.md delete mode 100644 python-test/docs/integration/remove_all_datasets.md delete mode 100644 python-test/docs/integration/remove_all_policies.md delete mode 100644 python-test/docs/integration/remove_dataset.md delete mode 100644 python-test/docs/integration/remove_group.md delete mode 100644 python-test/docs/integration/remove_one_dataset_of_multiples_with_same_policy.md delete mode 100644 python-test/docs/integration/remove_one_of_multiple_datasets.md delete mode 100644 python-test/docs/integration/remove_one_of_multiple_policies.md delete mode 100644 python-test/docs/integration/remove_policy.md delete mode 100644 python-test/docs/integration/remove_sink.md delete mode 100644 python-test/docs/integration/reset_agent_remotely.md delete mode 100644 python-test/docs/integration/sink_active_while_scraping_metrics.md delete mode 100644 python-test/docs/integration/sink_error_invalid_credentials.md delete mode 100644 python-test/docs/integration/sink_idle_30_minutes.md delete mode 100644 python-test/docs/integration/subscribe_an_agent_to_multiple_groups_created_after_agent_provisioning.md delete mode 100644 python-test/docs/integration/subscribe_an_agent_to_multiple_groups_created_before_agent_provisioning.md delete mode 100644 python-test/docs/login/check_if_email_and_password_are_required_fields.md delete mode 100644 python-test/docs/login/login_with_invalid_credentials.md delete mode 100644 python-test/docs/login/login_with_valid_credentials.md delete mode 100644 python-test/docs/login/request_password_with_registered_email_address.md delete mode 100644 python-test/docs/login/request_password_with_unregistered_email_address.md delete mode 100644 python-test/docs/login/request_registration_of_a_registered_account_using_registered_password.md delete mode 100644 python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_and_company.md delete mode 100644 python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_and_username.md delete mode 100644 python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_username_and_company.md delete mode 100644 python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password.md delete mode 100644 python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_and_company.md delete mode 100644 python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_and_username.md delete mode 100644 python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_username_and_company.md delete mode 100644 python-test/docs/login/request_registration_of_an_unregistered_account_with_invalid_password_and_invalid_email.md delete mode 100644 python-test/docs/login/request_registration_of_an_unregistered_account_with_invalid_password_and_valid_email.md delete mode 100644 python-test/docs/login/request_registration_of_an_unregistered_account_with_valid_password_and_invalid_email.md delete mode 100644 python-test/docs/login/request_registration_of_an_unregistered_account_with_valid_password_and_valid_email.md delete mode 100644 python-test/docs/policies/check_if_is_possible_cancel_operations_with_no_change.md delete mode 100644 python-test/docs/policies/check_if_total_policies_on_policies'_page_is_correct.md delete mode 100644 python-test/docs/policies/check_policies_details.md delete mode 100644 python-test/docs/policies/create_3_duplicated_dns_policy_without_insert_new_name_and_1_with_new_name.md delete mode 100644 python-test/docs/policies/create_4_duplicated_policy_with_new_name.md delete mode 100644 python-test/docs/policies/create_duplicated_dhcp_policy_without_insert_new_name.md delete mode 100644 python-test/docs/policies/create_duplicated_dns_policy_without_insert_new_name.md delete mode 100644 python-test/docs/policies/create_duplicated_net_policy_without_insert_new_name.md delete mode 100644 python-test/docs/policies/create_policy_with_description.md delete mode 100644 python-test/docs/policies/create_policy_with_dhcp_handler.md delete mode 100644 python-test/docs/policies/create_policy_with_dns_handler.md delete mode 100644 python-test/docs/policies/create_policy_with_duplicate_name.md delete mode 100644 python-test/docs/policies/create_policy_with_invalid_name_(regex).md delete mode 100644 python-test/docs/policies/create_policy_with_multiple_handlers.md delete mode 100644 python-test/docs/policies/create_policy_with_net_handler.md delete mode 100644 python-test/docs/policies/create_policy_with_no_agent_provisioned.md delete mode 100644 python-test/docs/policies/create_policy_without_description.md delete mode 100644 python-test/docs/policies/edit_a_policy_through_the_details_modal.md delete mode 100644 python-test/docs/policies/edit_policy_bpf_filter_expression.md delete mode 100644 python-test/docs/policies/edit_policy_description.md delete mode 100644 python-test/docs/policies/edit_policy_handler.md delete mode 100644 python-test/docs/policies/edit_policy_host_specification.md delete mode 100644 python-test/docs/policies/edit_policy_name.md delete mode 100644 python-test/docs/policies/edit_policy_only_qname_suffix.md delete mode 100644 python-test/docs/policies/edit_policy_only_rcode.md delete mode 100644 python-test/docs/policies/edit_policy_pcap_source.md delete mode 100644 python-test/docs/policies/remove_policy_using_correct_name.md delete mode 100644 python-test/docs/policies/remove_policy_using_incorrect_name.md delete mode 100644 python-test/docs/policies/test_policy_filters.md delete mode 100644 python-test/docs/sanity.md delete mode 100644 python-test/docs/sinks/check_if_is_possible_cancel_operations_with_no_change.md delete mode 100644 python-test/docs/sinks/check_if_remote_host,_username_and_password_are_required_to_create_a_sink.md delete mode 100644 python-test/docs/sinks/check_if_total_sinks_on_sinks'_page_is_correct.md delete mode 100644 python-test/docs/sinks/check_sink_details.md delete mode 100644 python-test/docs/sinks/create_sink_with_description.md delete mode 100644 python-test/docs/sinks/create_sink_with_duplicate_name.md delete mode 100644 python-test/docs/sinks/create_sink_with_invalid_name_(regex).md delete mode 100644 python-test/docs/sinks/create_sink_with_multiple_tags.md delete mode 100644 python-test/docs/sinks/create_sink_with_tags.md delete mode 100644 python-test/docs/sinks/create_sink_without_description.md delete mode 100644 python-test/docs/sinks/create_sink_without_tags.md delete mode 100644 python-test/docs/sinks/edit_a_sink_through_the_details_modal.md delete mode 100644 python-test/docs/sinks/edit_sink_description.md delete mode 100644 python-test/docs/sinks/edit_sink_name.md delete mode 100644 python-test/docs/sinks/edit_sink_password.md delete mode 100644 python-test/docs/sinks/edit_sink_remote_host.md delete mode 100644 python-test/docs/sinks/edit_sink_tags.md delete mode 100644 python-test/docs/sinks/edit_sink_username.md delete mode 100644 python-test/docs/sinks/remove_sink_using_correct_name.md delete mode 100644 python-test/docs/sinks/remove_sink_using_incorrect_name.md delete mode 100644 python-test/docs/sinks/test_sink_filters.md delete mode 100644 python-test/docs/smoke.md create mode 100644 python-test/features/migration.feature diff --git a/.github/workflows/go-develop.yml b/.github/workflows/go-develop.yml index 8a92ffc0b..a45abb34c 100644 --- a/.github/workflows/go-develop.yml +++ b/.github/workflows/go-develop.yml @@ -101,7 +101,7 @@ jobs: if: ${{ needs.prebuild.outputs.docs == 'true' && github.event_name != 'pull_request' }} with: token: ${{ secrets.GH_ORB_ACCESS_TOKEN }} - repository: ns1labs/orb-website + repository: orb-community/orb-website event-type: build-docs client-payload: '{"branch_name": "main"}' diff --git a/.github/workflows/go-production.yml b/.github/workflows/go-production.yml index 5b405437c..9fce66047 100644 --- a/.github/workflows/go-production.yml +++ b/.github/workflows/go-production.yml @@ -479,7 +479,7 @@ jobs: with: branch: production header: ":first_place_medal: :orb: *Deployed new features in orb.live* :orb: :rocket: :tada:" - github_repo: ns1labs/orb + github_repo: orb-community/orb slack_channel: C041B9204CF # orb netdev slack channel ID slack_api_token: ${{ secrets.SLACK_APP_TOKEN }} diff --git a/.gitignore b/.gitignore index 0345ce02d..4cccce3b1 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,6 @@ python-test/behave_orb test_agent_name* docker/otel-collector-config.yaml !docker/otel-collector-config.yaml.tpl + +kind/* +!kind/README.md diff --git a/Makefile b/Makefile index 0768f7690..d99c4deb2 100644 --- a/Makefile +++ b/Makefile @@ -135,9 +135,9 @@ install-helm: install-kubectl: cd /tmp && \ - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \ - chmod a+x /kubectl && \ - sudo mv ./kubectl /user/local/bin/kubectl + curl -LO "https://dl.k8s.io/release/v1.22.1/bin/linux/amd64/kubectl" && \ + chmod a+x ./kubectl && \ + sudo mv ./kubectl /usr/local/bin/kubectl install-docker: cd /tmp @@ -156,7 +156,7 @@ install-k9s: prepare-helm: cd ./kind/ && \ helm repo add jaegertracing https://jaegertracing.github.io/helm-charts && \ - helm repo add ns1labs-orb https://ns1labs.github.io/orb-helm/ && \ + helm repo add orb-community https://orb-community.github.io/orb-helm/ && \ helm dependency build kind-create-all: kind-create-cluster kind-install-orb @@ -164,7 +164,7 @@ kind-create-all: kind-create-cluster kind-install-orb kind-upgrade-all: kind-load-images kind-upgrade-orb kind-create-cluster: - kind create cluster --image kindest/node:v1.23.0 --config=./kind/config.yaml + kind create cluster --image kindest/node:v1.22.15 --config=./kind/config.yaml kind-delete-cluster: kind delete cluster @@ -189,6 +189,7 @@ kind-install-orb: kind-upgrade-orb: helm upgrade -n orb kind-orb ./kind + kubectl rollout restart deployment -n orb kind-delete-orb: kubectl delete -f ./kind/nginx.yaml @@ -200,7 +201,7 @@ kind-delete-orb: # -run: kind-create-all +run: prepare-helm kind-create-all stop: kind-delete-orb kind-delete-cluster diff --git a/README.md b/README.md index cb01bb436..177e4fca8 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,11 @@ Orb -Orb UI Preview +Orb UI Preview -[![Total alerts](https://img.shields.io/lgtm/alerts/g/ns1labs/orb.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/ns1labs/orb/alerts/) -[![Go Report Card](https://goreportcard.com/badge/github.com/ns1labs/orb)](https://goreportcard.com/report/github.com/ns1labs/orb) -[![CodeCov](https://codecov.io/gh/ns1labs/orb/branch/develop/graph/badge.svg)](https://app.codecov.io/gh/ns1labs/orb/tree/develop) -[![Language grade: JavaScript](https://img.shields.io/lgtm/grade/javascript/g/ns1labs/orb.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/ns1labs/orb/context:javascript) -**Orb** is a new kind of observability platform that helps operators, developers, and end users understand their -networks, distributed applications, and traffic flows in real time. +[![Go Report Card](https://goreportcard.com/badge/github.com/ns1labs/orb)](https://goreportcard.com/report/github.com/orb-community/orb) +[![CodeCov](https://codecov.io/gh/ns1labs/orb/branch/develop/graph/badge.svg)](https://app.codecov.io/gh/orb-community/orb/tree/develop) + +**Orb** is a modern network observability platform built to provide critical visibility into increasingly complex and distributed networks. It can analyze network traffic, run synthetic network probes, and connect the resulting telemetry directly to your existing observability stacks with OpenTelemetry. Orb differentiates from other solutions by pushing analysis close to the traffic sources (reducing inactionable metrics and processing costs), and allows for dynamic reconfiguration of remote agents in real time. Ready to dive in? See [orb.community](https://orb.community) for [installation instructions](https://orb.community/documentation/install/). @@ -25,7 +23,7 @@ resulting in light-weight, actionable metrics. Based on the [pktvisor observability agent](https://pktvisor.dev), Orb's goal is to push analysis to the edge, where high resolution data can be analysed in real time without the need to send raw data to a central location for batch processing. -[Current analysis](https://github.com/ns1labs/pktvisor/wiki/Current-Metrics) focuses on L2-L3 Network, DNS, and DHCP +[Current analysis](https://github.com/orb-community/pktvisor/wiki/Current-Metrics) focuses on L2-L3 Network, DNS, and DHCP with more analyzers in the works. ## Realtime Agent Orchestration @@ -36,9 +34,7 @@ observability [policies](https://orb.community/about/#policies) designed to prec are grouped and addressed based on [tags](https://orb.community/about/#agent-group). ## Flexible Integration With Modern Observability Stacks - -Orb was built to integrate with modern observability stacks, supporting [Prometheus](https://prometheus.io/) natively -and designed to support arbitrary [sinks](https://orb.community/about/#sinks) in the future. Collection and sinking of the +Orb was built to integrate with modern observability stacks, supporting [OpenTelemetry](https://opentelemetry.io/) natively. Collection and sinking of the metrics from the agents is included; there is no need to run additional data collection pipelines for Orb metrics. ## Portal and REST API Included @@ -55,18 +51,17 @@ avoiding vendor lock-in. *** -# Backed by NS1 +# Backed by NetBox Labs -**Orb** was born at [NS1 Labs](https://ns1.com/labs), where we're committed to +**Orb** was born at [NetBox Labs](https://netboxlabs.com/), where we're committed to making [open source, dynamic edge observability a reality](https://ns1.com/blog/orb-a-new-paradigm-for-dynamic-edge-observability) . *** * [Installation Instructions](https://orb.community/documentation/install/) -* [View our Wiki](https://github.com/ns1labs/orb/wiki) for technical and architectural information -* [File an issue](https://github.com/ns1labs/orb/issues/new) -* Follow our [public work board](https://github.com/ns1labs/orb/projects/2) -* Start a [Discussion](https://github.com/ns1labs/orb/discussions) -* [Join us on Slack](https://join.slack.com/t/ns1labs/shared_invite/zt-qqsm5cb4-9fsq1xa~R3h~nX6W0sJzmA) -* Send mail to [info@pktvisor.dev](mailto:info@pktvisor.dev) +* [View our Wiki](https://github.com/orb-community/orb/wiki) for technical and architectural information +* [File an issue](https://github.com/orb-community/orb/issues/new) +* Start a [Discussion](https://github.com/orb-community/orb/discussions) +* [Join us on Slack](https://netdev.chat/) +* Send mail to [orb@netboxlabs.com](mailto:orb@netboxlabs.com) diff --git a/agent/backend/pktvisor/scrape.go b/agent/backend/pktvisor/scrape.go index cee984a76..de00f8bcc 100644 --- a/agent/backend/pktvisor/scrape.go +++ b/agent/backend/pktvisor/scrape.go @@ -35,9 +35,9 @@ func (p *pktvisorBackend) scrapeMetrics(period uint) (map[string]interface{}, er return metrics, nil } -func (p *pktvisorBackend) createOtlpMqttExporter(ctx context.Context) (component.MetricsExporter, error) { +func (p *pktvisorBackend) createOtlpMqttExporter(ctx context.Context, cancelFunc context.CancelFunc) (component.MetricsExporter, error) { - bridgeService := otel.NewBridgeService(&p.policyRepo, p.agentTags) + bridgeService := otel.NewBridgeService(ctx, &p.policyRepo, p.agentTags) if p.mqttClient != nil { cfg := otlpmqttexporter.CreateConfigClient(p.mqttClient, p.otlpMetricsTopic, p.pktvisorVersion, bridgeService) set := otlpmqttexporter.CreateDefaultSettings(p.logger) @@ -168,7 +168,7 @@ func (p *pktvisorBackend) scrapeOpenTelemetry(ctx context.Context) { if p.mqttClient != nil { if !ok { var errStartExp error - p.exporter[policyID], errStartExp = p.createOtlpMqttExporter(exeCtx) + p.exporter[policyID], errStartExp = p.createOtlpMqttExporter(exeCtx, execCancelF) if errStartExp != nil { p.logger.Error("failed to create a exporter", zap.Error(err)) return @@ -204,6 +204,9 @@ func (p *pktvisorBackend) scrapeOpenTelemetry(ctx context.Context) { } } select { + case <-exeCtx.Done(): + ctx.Done() + p.cancelFunc() case <-ctx.Done(): err := p.exporter[policyID].Shutdown(exeCtx) if err != nil { diff --git a/agent/otel/bridgeservice.go b/agent/otel/bridgeservice.go index 2a625551d..d260ca879 100644 --- a/agent/otel/bridgeservice.go +++ b/agent/otel/bridgeservice.go @@ -1,12 +1,14 @@ package otel import ( + "context" "github.com/ns1labs/orb/agent/policies" "strings" ) type AgentBridgeService interface { RetrieveAgentInfoByPolicyName(policyName string) (*AgentDataPerPolicy, error) + NotifyAgentDisconnection(ctx context.Context, err error) } type AgentDataPerPolicy struct { @@ -15,21 +17,23 @@ type AgentDataPerPolicy struct { AgentTags map[string]string } -var _ AgentBridgeService = (*bridgeService)(nil) +var _ AgentBridgeService = (*BridgeService)(nil) -type bridgeService struct { - policyRepo policies.PolicyRepo - AgentTags map[string]string +type BridgeService struct { + bridgeContext context.Context + policyRepo policies.PolicyRepo + AgentTags map[string]string } -func NewBridgeService(policyRepo *policies.PolicyRepo, agentTags map[string]string) *bridgeService { - return &bridgeService{ - policyRepo: *policyRepo, - AgentTags: agentTags, +func NewBridgeService(ctx context.Context, policyRepo *policies.PolicyRepo, agentTags map[string]string) *BridgeService { + return &BridgeService{ + bridgeContext: ctx, + policyRepo: *policyRepo, + AgentTags: agentTags, } } -func (b *bridgeService) RetrieveAgentInfoByPolicyName(policyName string) (*AgentDataPerPolicy, error) { +func (b *BridgeService) RetrieveAgentInfoByPolicyName(policyName string) (*AgentDataPerPolicy, error) { pData, err := b.policyRepo.GetByName(policyName) if err != nil { return nil, err @@ -40,3 +44,8 @@ func (b *bridgeService) RetrieveAgentInfoByPolicyName(policyName string) (*Agent AgentTags: b.AgentTags, }, nil } + +func (b *BridgeService) NotifyAgentDisconnection(ctx context.Context, err error) { + ctx.Done() + b.bridgeContext.Done() +} diff --git a/agent/otel/otlpmqttexporter/otlp.go b/agent/otel/otlpmqttexporter/otlp.go index f4eaf901b..9b2020b5c 100644 --- a/agent/otel/otlpmqttexporter/otlp.go +++ b/agent/otel/otlpmqttexporter/otlp.go @@ -230,7 +230,7 @@ func (e *exporter) pushMetrics(ctx context.Context, md pmetric.Metrics) error { e.logger.Info("request metrics count per policyID", zap.String("policyID", e.policyID), zap.Int("metric_count", md.MetricCount())) err = e.export(ctx, e.config.MetricsTopic, request) if err != nil { - defer ctx.Done() + ctx.Done() return err } return err @@ -240,11 +240,12 @@ func (e *exporter) pushLogs(_ context.Context, _ plog.Logs) error { return fmt.Errorf("not implemented") } -func (e *exporter) export(_ context.Context, metricsTopic string, request []byte) error { +func (e *exporter) export(ctx context.Context, metricsTopic string, request []byte) error { compressedPayload := e.compressBrotli(request) c := *e.config.Client if token := c.Publish(metricsTopic, 1, false, compressedPayload); token.Wait() && token.Error() != nil { e.logger.Error("error sending metrics RPC", zap.String("topic", metricsTopic), zap.Error(token.Error())) + e.config.OrbAgentService.NotifyAgentDisconnection(ctx, token.Error()) return token.Error() } e.logger.Info("scraped and published metrics", zap.String("topic", metricsTopic), zap.Int("payload_size_b", len(request)), zap.Int("compressed_payload_size_b", len(compressedPayload))) diff --git a/agent/policies/types.go b/agent/policies/types.go index 62f7be29f..a7ead2476 100644 --- a/agent/policies/types.go +++ b/agent/policies/types.go @@ -50,7 +50,7 @@ var policyStateMap = [...]string{ "running", "failed_to_apply", "offline", - "NoTapMatch", + "no_tap_match", } var policyStateRevMap = map[string]PolicyState{ diff --git a/cmd/maestro/main.go b/cmd/maestro/main.go index 9c195a616..05315bbbe 100644 --- a/cmd/maestro/main.go +++ b/cmd/maestro/main.go @@ -11,12 +11,6 @@ package main import ( "context" "fmt" - sinksgrpc "github.com/ns1labs/orb/sinks/api/grpc" - "github.com/opentracing/opentracing-go" - jconfig "github.com/uber/jaeger-client-go/config" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" "io" "os" "os/signal" @@ -25,6 +19,14 @@ import ( "syscall" "time" + sinksgrpc "github.com/ns1labs/orb/sinks/api/grpc" + "github.com/opentracing/opentracing-go" + "github.com/spf13/viper" + jconfig "github.com/uber/jaeger-client-go/config" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "github.com/ns1labs/orb/maestro" "github.com/ns1labs/orb/pkg/config" "go.uber.org/zap" @@ -41,7 +43,8 @@ const ( func main() { - esCfg := config.LoadEsConfig(envPrefix) + streamEsCfg := loadStreamEsConfig(envPrefix) + sinkerEsCfg := loadSinkerEsConfig(envPrefix) svcCfg := config.LoadBaseServiceConfig(envPrefix, httpPort) jCfg := config.LoadJaegerConfig(envPrefix) sinksGRPCCfg := config.LoadGRPCConfig("orb", "sinks") @@ -71,14 +74,20 @@ func main() { _ = logger.Sync() }(logger) log := logger.Sugar() - esClient := connectToRedis(esCfg.URL, esCfg.Pass, esCfg.DB, logger) + streamEsClient := connectToRedis(streamEsCfg.URL, streamEsCfg.Pass, streamEsCfg.DB, logger) defer func(esClient *r.Client) { err := esClient.Close() if err != nil { return } - }(esClient) - + }(streamEsClient) + sinkerEsClient := connectToRedis(sinkerEsCfg.URL, sinkerEsCfg.Pass, sinkerEsCfg.DB, logger) + defer func(esClient *r.Client) { + err := esClient.Close() + if err != nil { + return + } + }(sinkerEsClient) tracer, tracerCloser := initJaeger(svcName, jCfg.URL, logger) defer func(tracerCloser io.Closer) { err := tracerCloser.Close() @@ -102,7 +111,7 @@ func main() { sinksGRPCClient := sinksgrpc.NewClient(tracer, sinksGRPCConn, sinksGRPCTimeout, logger) otelCfg := config.LoadOtelConfig(envPrefix) - svc := maestro.NewMaestroService(logger, esClient, sinksGRPCClient, esCfg, otelCfg) + svc := maestro.NewMaestroService(logger, streamEsClient, sinkerEsClient, sinksGRPCClient, streamEsCfg, otelCfg) errs := make(chan error, 2) mainContext, mainCancelFunction := context.WithCancel(context.Background()) @@ -189,3 +198,34 @@ func connectToRedis(redisURL, redisPass, redisDB string, logger *zap.Logger) *r. DB: db, }) } + +func loadStreamEsConfig(prefix string) config.EsConfig { + cfg := viper.New() + cfg.SetEnvPrefix(fmt.Sprintf("%s_stream_es", prefix)) + + cfg.SetDefault("url", "localhost:6379") + cfg.SetDefault("pass", "") + cfg.SetDefault("db", "0") + cfg.SetDefault("consumer", fmt.Sprintf("%s-es-consumer", prefix)) + + cfg.AllowEmptyEnv(true) + cfg.AutomaticEnv() + var esC config.EsConfig + cfg.Unmarshal(&esC) + return esC +} + +func loadSinkerEsConfig(prefix string) config.EsConfig { + cfg := viper.New() + cfg.SetEnvPrefix(fmt.Sprintf("%s_sinker_es", prefix)) + + cfg.SetDefault("url", "localhost:6378") + cfg.SetDefault("pass", "") + cfg.SetDefault("db", "1") + + cfg.AllowEmptyEnv(true) + cfg.AutomaticEnv() + var esC config.EsConfig + cfg.Unmarshal(&esC) + return esC +} diff --git a/cmd/migrate/main.go b/cmd/migrate/main.go index 02b0e2a98..6dfa7be49 100644 --- a/cmd/migrate/main.go +++ b/cmd/migrate/main.go @@ -54,7 +54,7 @@ func main() { usersDbCfg := config.LoadPostgresConfig(fmt.Sprintf("%s_%s", envPrefix, postgres.DbUsers), postgres.DbUsers) thingsDbCfg := config.LoadPostgresConfig(fmt.Sprintf("%s_%s", envPrefix, postgres.DbThings), postgres.DbThings) sinksDbCfg := config.LoadPostgresConfig(fmt.Sprintf("%s_%s", envPrefix, postgres.DBSinks), postgres.DBSinks) - sinksEncryptionKey := config.LoadEncryptionKey(fmt.Sprintf("%s_%s", envPrefix, postgres.DBSinks)) + //sinksEncryptionKey := config.LoadEncryptionKey(fmt.Sprintf("%s_%s", envPrefix, postgres.DBSinks)) dbs := make(map[string]postgres.Database) @@ -67,8 +67,11 @@ func main() { svc := migrate.New( log, dbs, - migration.NewM1KetoPolicies(log, dbs), - migration.NewM2SinksCredentials(log, sinksDB, sinksEncryptionKey), + // When generating a new migration image + // Comment the previous and keep only the necessary steps to migrate up/down + //migration.NewM1KetoPolicies(log, dbs), + //migration.NewM2SinksCredentials(log, sinksDB, sinksEncryptionKey), + migration.NewM3SinksOpenTelemetry(log, sinksDB), ) rootCmd := &cobra.Command{ diff --git a/fleet/agent_group_service.go b/fleet/agent_group_service.go index be687631a..ef8e8b1f7 100644 --- a/fleet/agent_group_service.go +++ b/fleet/agent_group_service.go @@ -131,6 +131,8 @@ func (svc fleetService) EditAgentGroup(ctx context.Context, token string, group if group.Tags == nil { group.Tags = currentAgentGroup.Tags + } else if group.Tags != nil && len(*group.Tags) == 0 { + return AgentGroup{}, errors.Wrap(errors.ErrMalformedEntity, errors.New("group tags can not be empty")) } ag, err := svc.agentGroupRepository.Update(ctx, ownerID, group) @@ -300,7 +302,7 @@ func (svc fleetService) ValidateAgentGroup(ctx context.Context, token string, ag } ag.MFOwnerID = mfOwnerID - res, err := svc.agentRepo.RetrieveMatchingAgents(ctx, mfOwnerID, ag.Tags) + res, err := svc.agentRepo.RetrieveMatchingAgents(ctx, mfOwnerID, *ag.Tags) if err != nil { return AgentGroup{}, err } diff --git a/fleet/agent_group_service_test.go b/fleet/agent_group_service_test.go index 9c5065f03..2a48baaa3 100644 --- a/fleet/agent_group_service_test.go +++ b/fleet/agent_group_service_test.go @@ -56,6 +56,7 @@ var ( } invalidName = strings.Repeat("m", maxNameSize+1) metadata = map[string]interface{}{"meta": "data"} + emptyTags = types.Tags{} ) func generateChannels() map[string]things.Channel { @@ -125,13 +126,11 @@ func TestCreateAgentGroup(t *testing.T) { MFOwnerID: ownerID.String(), Name: nameID, Description: &description, - Tags: make(map[string]string), - Created: time.Time{}, - } - - validAgent.Tags = map[string]string{ - "region": "eu", - "node_type": "dns", + Tags: &types.Tags{ + "region": "eu", + "node_type": "dns", + }, + Created: time.Time{}, } cases := map[string]struct { @@ -403,26 +402,26 @@ func TestUpdateAgentGroup(t *testing.T) { token: token, err: nil, }, - "update existing agent group with tags empty": { + "update existing agent group with tags omitted": { group: fleet.AgentGroup{ ID: ag.ID, Name: ag.Name, MFOwnerID: ag.MFOwnerID, - Tags: map[string]string{}, }, expectedGroup: fleet.AgentGroup{ Name: ag.Name, - Tags: map[string]string{}, + Tags: ag.Tags, Description: ag.Description, }, token: token, err: nil, }, - "update existing agent group with tags omitted": { + "update existing agent group with tags empty": { group: fleet.AgentGroup{ ID: ag.ID, Name: ag.Name, MFOwnerID: ag.MFOwnerID, + Tags: &emptyTags, }, expectedGroup: fleet.AgentGroup{ Name: ag.Name, @@ -430,7 +429,7 @@ func TestUpdateAgentGroup(t *testing.T) { Description: ag.Description, }, token: token, - err: nil, + err: errors.ErrMalformedEntity, }, } @@ -440,6 +439,7 @@ func TestUpdateAgentGroup(t *testing.T) { if err == nil { assert.Equal(t, *tc.expectedGroup.Description, *agentGroupTest.Description, fmt.Sprintf("%s: expected %s got %s", desc, *tc.expectedGroup.Description, *agentGroupTest.Description)) assert.Equal(t, tc.expectedGroup.Name, agentGroupTest.Name, fmt.Sprintf("%s: expected %s got %s", desc, tc.expectedGroup.Name, agentGroupTest.Name)) + assert.Equal(t, tc.expectedGroup.Tags, agentGroupTest.Tags, fmt.Sprintf("%s: expected %p got %p", desc, tc.expectedGroup.Tags, agentGroupTest.Tags)) } assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %d got %d", desc, tc.err, err)) }) @@ -501,7 +501,7 @@ func createAgentGroup(t *testing.T, name string, svc fleet.AgentGroupService) (f } agCopy.Name = validName agCopy.Description = &description - agCopy.Tags = map[string]string{ + agCopy.Tags = &types.Tags{ "tag": "test", } ag, err := svc.CreateAgentGroup(context.Background(), token, agCopy) @@ -545,12 +545,10 @@ func TestValidateAgentGroup(t *testing.T) { MFOwnerID: ownerID.String(), Name: nameID, Description: &description, - Tags: make(map[string]string), - } - - validAgent.Tags = map[string]string{ - "region": "eu", - "node_type": "dns", + Tags: &types.Tags{ + "region": "eu", + "node_type": "dns", + }, } cases := map[string]struct { diff --git a/fleet/agent_groups.go b/fleet/agent_groups.go index 2a223f8a6..f6d08e52a 100644 --- a/fleet/agent_groups.go +++ b/fleet/agent_groups.go @@ -17,7 +17,7 @@ type AgentGroup struct { Name types.Identifier Description *string MFChannelID string - Tags types.Tags + Tags *types.Tags Created time.Time MatchingAgents types.Metadata } diff --git a/fleet/agent_service.go b/fleet/agent_service.go index fca4ed130..05537c035 100644 --- a/fleet/agent_service.go +++ b/fleet/agent_service.go @@ -194,6 +194,9 @@ func (svc fleetService) EditAgent(ctx context.Context, token string, agent Agent if newName := agent.Name.String(); newName == "" { agent.Name = currentAgent.Name } + if agent.OrbTags == nil { + agent.OrbTags = currentAgent.OrbTags + } err = svc.agentRepo.UpdateAgentByID(ctx, ownerID, agent) if err != nil { diff --git a/fleet/agent_service_test.go b/fleet/agent_service_test.go index 2a8ac8cf8..883fc749c 100644 --- a/fleet/agent_service_test.go +++ b/fleet/agent_service_test.go @@ -244,18 +244,27 @@ func TestUpdateAgent(t *testing.T) { require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) ag, err := fleetService.CreateAgent(context.Background(), "token", fleet.Agent{ - Name: validAgentName, - AgentTags: map[string]string{"test": "true"}, + Name: validAgentName, + OrbTags: &types.Tags{"test": "true"}, }) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) validName, err := types.NewIdentifier("group") require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - _, _ = fleetService.CreateAgentGroup(context.Background(), "token", fleet.AgentGroup{ Name: validName, - Tags: map[string]string{"test": "true"}, + Tags: &types.Tags{ + "test": "true", + }, + }) + + validAgentNameTestAttributeTags, err := types.NewIdentifier("tagsTest") + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + agentTestAttributeTags, err := fleetService.CreateAgent(context.Background(), "token", fleet.Agent{ + Name: validAgentNameTestAttributeTags, + OrbTags: &types.Tags{"test": "true"}, }) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) cases := map[string]struct { agent fleet.Agent @@ -279,7 +288,7 @@ func TestUpdateAgent(t *testing.T) { token: token, err: fleet.ErrNotFound, }, - "update existing agent without name": { + "update existing agent with omitted name": { agent: fleet.Agent{ MFThingID: ag.MFThingID, MFOwnerID: ag.MFOwnerID, @@ -291,6 +300,31 @@ func TestUpdateAgent(t *testing.T) { token: token, err: nil, }, + "update existing agent with empty tags": { + agent: fleet.Agent{ + MFThingID: ag.MFThingID, + MFOwnerID: ag.MFOwnerID, + OrbTags: &emptyTags, + }, + expectedAgent: fleet.Agent{ + Name: ag.Name, + OrbTags: &emptyTags, + }, + token: token, + err: nil, + }, + "update existing agent with omitted tags": { + agent: fleet.Agent{ + MFThingID: agentTestAttributeTags.MFThingID, + MFOwnerID: agentTestAttributeTags.MFOwnerID, + }, + expectedAgent: fleet.Agent{ + Name: agentTestAttributeTags.Name, + OrbTags: agentTestAttributeTags.OrbTags, + }, + token: token, + err: nil, + }, } for desc, tc := range cases { @@ -298,6 +332,7 @@ func TestUpdateAgent(t *testing.T) { agentTest, err := fleetService.EditAgent(context.Background(), tc.token, tc.agent) if err == nil { assert.Equal(t, tc.expectedAgent.Name, agentTest.Name, fmt.Sprintf("%s: expected %s got %s", desc, tc.expectedAgent.Name, agentTest.Name)) + assert.Equal(t, tc.expectedAgent.OrbTags, agentTest.OrbTags, fmt.Sprintf("%s: expected %p got %p", desc, tc.expectedAgent.OrbTags, agentTest.OrbTags)) } assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %d got %d", desc, tc.err, err)) }) @@ -314,12 +349,13 @@ func TestValidateAgent(t *testing.T) { nameID, err := types.NewIdentifier("eu-agents") require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + emptyTags := types.Tags{} validAgent := fleet.Agent{ MFOwnerID: ownerID.String(), Name: nameID, - OrbTags: make(map[string]string), + OrbTags: &emptyTags, } - validAgent.OrbTags = map[string]string{ + validAgent.OrbTags = &types.Tags{ "region": "eu", "node_type": "dns", } @@ -360,13 +396,14 @@ func TestCreateAgent(t *testing.T) { conflictCase, err := createAgent(t, "agent", fleetService) + emptyTags := types.Tags{} validAgent := fleet.Agent{ MFOwnerID: ownerID.String(), Name: nameID, - OrbTags: make(map[string]string), + OrbTags: &emptyTags, Created: time.Time{}, } - validAgent.OrbTags = map[string]string{ + validAgent.OrbTags = &types.Tags{ "region": "eu", "node_type": "dns", } @@ -535,7 +572,7 @@ func TestViewAgentInfoByChannelIDInternal(t *testing.T) { t.Run(desc, func(t *testing.T) { agent, err := fleetService.ViewAgentInfoByChannelIDInternal(context.Background(), tc.channelID) assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s", desc, tc.err, err)) - assert.Equal(t, tc.agent, agent, fmt.Sprintf("%s: expected %s got %s", desc, tc.agent, agent)) + assert.Equal(t, tc.agent, agent, fmt.Sprintf("%s: expected %v got %v", desc, tc.agent, agent)) }) } } diff --git a/fleet/agents.go b/fleet/agents.go index 9b7f8d602..93a62ef52 100644 --- a/fleet/agents.go +++ b/fleet/agents.go @@ -54,7 +54,7 @@ type Agent struct { MFKeyID string MFChannelID string Created time.Time - OrbTags types.Tags + OrbTags *types.Tags AgentTags types.Tags AgentMetadata types.Metadata State State diff --git a/fleet/api/grpc/endpoint.go b/fleet/api/grpc/endpoint.go index a6001d0fd..67315d580 100644 --- a/fleet/api/grpc/endpoint.go +++ b/fleet/api/grpc/endpoint.go @@ -83,7 +83,7 @@ func retrieveAgentInfoByChannelIDEndpoint(svc fleet.Service) endpoint.Endpoint { ownerID: agent.MFOwnerID, agentName: agent.Name.String(), agentTags: agent.AgentTags, - orbTags: agent.OrbTags, + orbTags: *agent.OrbTags, agentGroupIDs: groupIDs, } return res, nil diff --git a/fleet/api/http/endpoint.go b/fleet/api/http/endpoint.go index 50beae9b5..0aee6e0cb 100644 --- a/fleet/api/http/endpoint.go +++ b/fleet/api/http/endpoint.go @@ -31,7 +31,7 @@ func addAgentGroupEndpoint(svc fleet.Service) endpoint.Endpoint { group := fleet.AgentGroup{ Name: nID, Description: &req.Description, - Tags: req.Tags, + Tags: &req.Tags, } saved, err := svc.CreateAgentGroup(c, req.token, group) if err != nil { @@ -42,7 +42,7 @@ func addAgentGroupEndpoint(svc fleet.Service) endpoint.Endpoint { ID: saved.ID, Name: saved.Name.String(), Description: *saved.Description, - Tags: saved.Tags, + Tags: *saved.Tags, MatchingAgents: saved.MatchingAgents, created: true, } @@ -65,7 +65,7 @@ func viewAgentGroupEndpoint(svc fleet.Service) endpoint.Endpoint { ID: agentGroup.ID, Name: agentGroup.Name.String(), Description: *agentGroup.Description, - Tags: agentGroup.Tags, + Tags: *agentGroup.Tags, TsCreated: agentGroup.Created, MatchingAgents: agentGroup.MatchingAgents, } @@ -100,7 +100,7 @@ func listAgentGroupsEndpoint(svc fleet.Service) endpoint.Endpoint { ID: ag.ID, Name: ag.Name.String(), Description: *ag.Description, - Tags: ag.Tags, + Tags: *ag.Tags, TsCreated: ag.Created, MatchingAgents: ag.MatchingAgents, } @@ -118,17 +118,22 @@ func editAgentGroupEndpoint(svc fleet.Service) endpoint.Endpoint { } var validName types.Identifier - if req.Name != "" { - validName, err = types.NewIdentifier(req.Name) + if req.Name != nil { + validName, err = types.NewIdentifier(*req.Name) if err != nil { return agentGroupRes{}, errors.Wrap(errors.ErrMalformedEntity, err) } } + var groupTags *types.Tags + if req.Tags != nil { + groupTags = req.Tags + } + ag := fleet.AgentGroup{ ID: req.id, Name: validName, Description: req.Description, - Tags: req.Tags, + Tags: groupTags, } data, err := svc.EditAgentGroup(ctx, req.token, ag) @@ -140,7 +145,7 @@ func editAgentGroupEndpoint(svc fleet.Service) endpoint.Endpoint { ID: data.ID, Name: data.Name.String(), Description: *data.Description, - Tags: data.Tags, + Tags: *data.Tags, TsCreated: data.Created, MatchingAgents: data.MatchingAgents, } @@ -178,7 +183,7 @@ func addAgentEndpoint(svc fleet.Service) endpoint.Endpoint { agent := fleet.Agent{ Name: nID, - OrbTags: req.OrbTags, + OrbTags: &req.OrbTags, AgentTags: req.AgentTags, } saved, err := svc.CreateAgent(c, req.token, agent) @@ -191,7 +196,7 @@ func addAgentEndpoint(svc fleet.Service) endpoint.Endpoint { ID: saved.MFThingID, State: saved.State.String(), Key: saved.MFKeyID, - OrbTags: saved.OrbTags, + OrbTags: *saved.OrbTags, AgentTags: saved.AgentTags, AgentMetadata: saved.AgentMetadata, LastHBData: saved.LastHBData, @@ -222,7 +227,7 @@ func viewAgentEndpoint(svc fleet.Service) endpoint.Endpoint { Name: ag.Name.String(), ChannelID: ag.MFChannelID, AgentTags: ag.AgentTags, - OrbTags: ag.OrbTags, + OrbTags: *ag.OrbTags, TsCreated: ag.Created, AgentMetadata: ag.AgentMetadata, State: ag.State.String(), @@ -303,7 +308,7 @@ func listAgentsEndpoint(svc fleet.Service) endpoint.Endpoint { Name: ag.Name.String(), ChannelID: ag.MFChannelID, AgentTags: ag.AgentTags, - OrbTags: ag.OrbTags, + OrbTags: *ag.OrbTags, TsCreated: ag.Created, State: ag.State.String(), TsLastHB: ag.LastHB, @@ -330,7 +335,7 @@ func validateAgentGroupEndpoint(svc fleet.Service) endpoint.Endpoint { group := fleet.AgentGroup{ Name: nID, - Tags: req.Tags, + Tags: &req.Tags, } validated, err := svc.ValidateAgentGroup(c, req.token, group) if err != nil { @@ -339,7 +344,7 @@ func validateAgentGroupEndpoint(svc fleet.Service) endpoint.Endpoint { res := validateAgentGroupRes{ Name: validated.Name.String(), - Tags: validated.Tags, + Tags: *validated.Tags, MatchingAgents: validated.MatchingAgents, } @@ -356,16 +361,21 @@ func editAgentEndpoint(svc fleet.Service) endpoint.Endpoint { } var validName types.Identifier - if req.Name != "" { - validName, err = types.NewIdentifier(req.Name) + if req.Name != nil { + validName, err = types.NewIdentifier(*req.Name) if err != nil { return nil, errors.Wrap(errors.ErrMalformedEntity, err) } } + var agentOrbTags *types.Tags + if req.Tags != nil { + agentOrbTags = req.Tags + } + agent := fleet.Agent{ Name: validName, MFThingID: req.id, - OrbTags: req.Tags, + OrbTags: agentOrbTags, } ag, err := svc.EditAgent(ctx, req.token, agent) @@ -378,7 +388,7 @@ func editAgentEndpoint(svc fleet.Service) endpoint.Endpoint { Name: ag.Name.String(), ChannelID: ag.MFChannelID, AgentTags: ag.AgentTags, - OrbTags: ag.OrbTags, + OrbTags: *ag.OrbTags, TsCreated: ag.Created, AgentMetadata: ag.AgentMetadata, State: ag.State.String(), @@ -405,7 +415,7 @@ func validateAgentEndpoint(svc fleet.Service) endpoint.Endpoint { agent := fleet.Agent{ Name: nID, - OrbTags: req.OrbTags, + OrbTags: &req.OrbTags, } validated, err := svc.ValidateAgent(c, req.token, agent) if err != nil { @@ -414,7 +424,7 @@ func validateAgentEndpoint(svc fleet.Service) endpoint.Endpoint { res := validateAgentRes{ Name: validated.Name.String(), - OrbTags: validated.OrbTags, + OrbTags: *validated.OrbTags, } return res, nil } diff --git a/fleet/api/http/endpoint_test.go b/fleet/api/http/endpoint_test.go index 5df15f29f..14c9991cc 100644 --- a/fleet/api/http/endpoint_test.go +++ b/fleet/api/http/endpoint_test.go @@ -76,6 +76,7 @@ var ( metadata = map[string]interface{}{"type": "orb_agent"} tags = types.Tags{"region": "us", "node_type": "dns"} invalidName = strings.Repeat("m", maxNameSize+1) + emptyName = "" ) type testRequest struct { @@ -301,7 +302,7 @@ func TestListAgentGroup(t *testing.T) { ID: ag.ID, Name: ag.Name.String(), Description: *ag.Description, - Tags: ag.Tags, + Tags: *ag.Tags, TsCreated: ag.Created, MatchingAgents: nil, }) @@ -469,6 +470,8 @@ func TestUpdateAgentGroup(t *testing.T) { ag, err := createAgentGroup(t, "ue-agent-group", &cli) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + groupName := ag.Name.String() + cases := map[string]struct { req string id string @@ -478,8 +481,8 @@ func TestUpdateAgentGroup(t *testing.T) { }{ "update existing agent group": { req: toJSON(updateAgentGroupReq{ - Name: ag.Name.String(), - Description: *ag.Description, + Name: &groupName, + Description: ag.Description, Tags: ag.Tags, }), id: ag.ID, @@ -496,8 +499,8 @@ func TestUpdateAgentGroup(t *testing.T) { }, "update agent group with a invalid id": { req: toJSON(updateAgentGroupReq{ - Name: ag.Name.String(), - Description: *ag.Description, + Name: &groupName, + Description: ag.Description, Tags: ag.Tags, }), id: "invalid", @@ -507,8 +510,8 @@ func TestUpdateAgentGroup(t *testing.T) { }, "update non-existing agent group": { req: toJSON(updateAgentGroupReq{ - Name: ag.Name.String(), - Description: *ag.Description, + Name: &groupName, + Description: ag.Description, Tags: ag.Tags, }), id: wrongID, @@ -518,8 +521,8 @@ func TestUpdateAgentGroup(t *testing.T) { }, "update agent group with invalid user token": { req: toJSON(updateAgentGroupReq{ - Name: ag.Name.String(), - Description: *ag.Description, + Name: &groupName, + Description: ag.Description, Tags: ag.Tags, }), id: ag.ID, @@ -529,8 +532,8 @@ func TestUpdateAgentGroup(t *testing.T) { }, "update agent group with empty user token": { req: toJSON(updateAgentGroupReq{ - Name: ag.Name.String(), - Description: *ag.Description, + Name: &groupName, + Description: ag.Description, Tags: ag.Tags, }), id: ag.ID, @@ -540,8 +543,8 @@ func TestUpdateAgentGroup(t *testing.T) { }, "update agent group with invalid content type": { req: toJSON(updateAgentGroupReq{ - Name: ag.Name.String(), - Description: *ag.Description, + Name: &groupName, + Description: ag.Description, Tags: ag.Tags, }), id: ag.ID, @@ -551,8 +554,8 @@ func TestUpdateAgentGroup(t *testing.T) { }, "update agent group without content type": { req: toJSON(updateAgentGroupReq{ - Name: ag.Name.String(), - Description: *ag.Description, + Name: &groupName, + Description: ag.Description, Tags: ag.Tags, }), id: ag.ID, @@ -583,8 +586,8 @@ func TestUpdateAgentGroup(t *testing.T) { }, "add a agent group with invalid name": { req: toJSON(updateAgentGroupReq{ - Name: "g", - Description: *ag.Description, + Name: &invalidName, + Description: ag.Description, Tags: ag.Tags, }), id: ag.ID, @@ -594,9 +597,19 @@ func TestUpdateAgentGroup(t *testing.T) { }, "update existing agent group with empty tags": { req: toJSON(updateAgentGroupReq{ - Name: ag.Name.String(), - Description: *ag.Description, - Tags: map[string]string{}, + Name: &groupName, + Description: ag.Description, + Tags: &types.Tags{}, + }), + id: ag.ID, + contentType: contentType, + auth: token, + status: http.StatusBadRequest, + }, + "update existing agent group with omitted tags": { + req: toJSON(updateAgentGroupReq{ + Name: &groupName, + Description: ag.Description, }), id: ag.ID, contentType: contentType, @@ -605,7 +618,7 @@ func TestUpdateAgentGroup(t *testing.T) { }, "update existing agent group with omitted name": { req: toJSON(updateAgentGroupReq{ - Description: *ag.Description, + Description: ag.Description, Tags: ag.Tags, }), id: ag.ID, @@ -613,15 +626,16 @@ func TestUpdateAgentGroup(t *testing.T) { auth: token, status: http.StatusOK, }, - "update existing agent group with omitted tags": { + "update existing agent group with empty name": { req: toJSON(updateAgentGroupReq{ - Name: ag.Name.String(), - Description: *ag.Description, + Name: &emptyName, + Description: ag.Description, + Tags: ag.Tags, }), id: ag.ID, contentType: contentType, auth: token, - status: http.StatusOK, + status: http.StatusBadRequest, }, } @@ -909,7 +923,7 @@ func TestListAgent(t *testing.T) { Name: ag.Name.String(), ChannelID: ag.MFChannelID, AgentTags: ag.AgentTags, - OrbTags: ag.OrbTags, + OrbTags: *ag.OrbTags, TsCreated: ag.Created, AgentMetadata: ag.AgentMetadata, State: ag.State.String(), @@ -1080,6 +1094,8 @@ func TestUpdateAgent(t *testing.T) { ag, err := createAgent(t, "my-agent1", &cli) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + agentName := ag.Name.String() + cases := map[string]struct { req string id string @@ -1089,7 +1105,7 @@ func TestUpdateAgent(t *testing.T) { }{ "update existing agent": { req: toJSON(updateAgentReq{ - Name: ag.Name.String(), + Name: &agentName, Tags: ag.OrbTags, }), id: ag.MFThingID, @@ -1106,7 +1122,7 @@ func TestUpdateAgent(t *testing.T) { }, "update agent with a invalid id": { req: toJSON(updateAgentReq{ - Name: ag.Name.String(), + Name: &agentName, Tags: ag.OrbTags, }), id: "invalid", @@ -1116,7 +1132,7 @@ func TestUpdateAgent(t *testing.T) { }, "update non-existing agent": { req: toJSON(updateAgentReq{ - Name: ag.Name.String(), + Name: &agentName, Tags: ag.OrbTags, }), id: wrongID, @@ -1126,7 +1142,7 @@ func TestUpdateAgent(t *testing.T) { }, "update agent with invalid user token": { req: toJSON(updateAgentReq{ - Name: ag.Name.String(), + Name: &agentName, Tags: ag.OrbTags, }), id: ag.MFThingID, @@ -1136,7 +1152,7 @@ func TestUpdateAgent(t *testing.T) { }, "update agent with empty user token": { req: toJSON(updateAgentReq{ - Name: ag.Name.String(), + Name: &agentName, Tags: ag.OrbTags, }), id: ag.MFThingID, @@ -1146,7 +1162,7 @@ func TestUpdateAgent(t *testing.T) { }, "update agent with invalid content type": { req: toJSON(updateAgentReq{ - Name: ag.Name.String(), + Name: &agentName, Tags: ag.OrbTags, }), id: ag.MFThingID, @@ -1156,7 +1172,7 @@ func TestUpdateAgent(t *testing.T) { }, "update agent without content type": { req: toJSON(updateAgentReq{ - Name: ag.Name.String(), + Name: &agentName, Tags: ag.OrbTags, }), id: ag.MFThingID, @@ -1187,7 +1203,7 @@ func TestUpdateAgent(t *testing.T) { }, "update existing agent with invalid name": { req: toJSON(updateAgentReq{ - Name: "a", + Name: &invalidName, Tags: ag.OrbTags, }), id: ag.MFThingID, @@ -1195,13 +1211,42 @@ func TestUpdateAgent(t *testing.T) { auth: token, status: http.StatusBadRequest, }, - "update existing agent without name": { + "update existing agent with omitted name": { + req: toJSON(updateAgentReq{ + Tags: ag.OrbTags, + }), + id: ag.MFThingID, + contentType: contentType, + auth: token, + status: http.StatusOK, + }, + "update existing agent with empty name": { req: toJSON(updateAgentReq{ + Name: &emptyName, Tags: ag.OrbTags, }), id: ag.MFThingID, contentType: contentType, auth: token, + status: http.StatusBadRequest, + }, + "update existing agent with empty tags": { + req: toJSON(updateAgentReq{ + Name: &agentName, + Tags: &types.Tags{}, + }), + id: ag.MFThingID, + contentType: contentType, + auth: token, + status: http.StatusOK, + }, + "update existing agent with omitted tags": { + req: toJSON(updateAgentReq{ + Name: &agentName, + }), + id: ag.MFThingID, + contentType: contentType, + auth: token, status: http.StatusOK, }, } @@ -1633,7 +1678,7 @@ func createAgentGroup(t *testing.T, name string, cli *clientServer) (fleet.Agent validName, err := types.NewIdentifier(name) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) agCopy.Name = validName - agCopy.Tags = tags + agCopy.Tags = &tags description := "description example" agCopy.Description = &description @@ -1650,7 +1695,7 @@ func createAgent(t *testing.T, name string, cli *clientServer) (fleet.Agent, err validName, err := types.NewIdentifier(name) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) aCopy.Name = validName - aCopy.OrbTags = tags + aCopy.OrbTags = &tags a, err := cli.service.CreateAgent(context.Background(), token, aCopy) if err != nil { return fleet.Agent{}, err @@ -1712,13 +1757,13 @@ type agentsPageRes struct { type updateAgentGroupReq struct { token string - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - Tags types.Tags `json:"tags"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Tags *types.Tags `json:"tags"` } type updateAgentReq struct { token string - Name string `json:"name,omitempty"` - Tags types.Tags `json:"orb_tags"` + Name *string `json:"name,omitempty"` + Tags *types.Tags `json:"orb_tags,omitempty"` } diff --git a/fleet/api/http/requests.go b/fleet/api/http/requests.go index 0370da62a..eea30b68c 100644 --- a/fleet/api/http/requests.go +++ b/fleet/api/http/requests.go @@ -53,9 +53,9 @@ func (req addAgentGroupReq) validate() error { type updateAgentGroupReq struct { id string token string - Name string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - Tags types.Tags `json:"tags"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Tags *types.Tags `json:"tags"` } func (req updateAgentGroupReq) validate() error { @@ -63,7 +63,16 @@ func (req updateAgentGroupReq) validate() error { if req.token == "" { return errors.ErrUnauthorizedAccess } - if req.Name == "" && req.Tags == nil && req.Description == nil { + if req.Name == nil && req.Tags == nil && req.Description == nil { + return errors.ErrMalformedEntity + } + if req.Tags != nil { + if len(*req.Tags) == 0 { + return errors.ErrMalformedEntity + } + } + + if req.Name != nil && *req.Name == "" { return errors.ErrMalformedEntity } @@ -97,8 +106,8 @@ func (req addAgentReq) validate() error { type updateAgentReq struct { id string token string - Name string `json:"name,omitempty"` - Tags types.Tags `json:"orb_tags,omitempty"` + Name *string `json:"name,omitempty"` + Tags *types.Tags `json:"orb_tags,omitempty"` } func (req updateAgentReq) validate() error { @@ -107,7 +116,11 @@ func (req updateAgentReq) validate() error { return errors.ErrUnauthorizedAccess } - if req.Name == "" && req.Tags == nil { + if req.Name == nil && req.Tags == nil { + return errors.ErrMalformedEntity + } + + if req.Name != nil && *req.Name == "" { return errors.ErrMalformedEntity } diff --git a/fleet/comms_test.go b/fleet/comms_test.go index c9748bc3a..794fb7c4d 100644 --- a/fleet/comms_test.go +++ b/fleet/comms_test.go @@ -226,7 +226,9 @@ func TestNotifyAgentAllDatasets(t *testing.T) { group, err := fleetSVC.CreateAgentGroup(context.Background(), "token", fleet.AgentGroup{ Name: validGroupName, - Tags: map[string]string{"test": "true"}, + Tags: &types.Tags{ + "test": "true", + }, }) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) @@ -365,7 +367,9 @@ func TestNotifyAgentGroupMembership(t *testing.T) { _, err = fleetSVC.CreateAgentGroup(context.Background(), "token", fleet.AgentGroup{ Name: validGroupName, - Tags: map[string]string{"test": "true"}, + Tags: &types.Tags{ + "test": "true", + }, }) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) @@ -497,7 +501,9 @@ func TestNotifyAgentNewGroupMembership(t *testing.T) { _, err = fleetSVC.CreateAgentGroup(context.Background(), "token", fleet.AgentGroup{ Name: validGroupName, - Tags: map[string]string{"test": "true"}, + Tags: &types.Tags{ + "test": "true", + }, }) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) diff --git a/fleet/postgres/agent_groups.go b/fleet/postgres/agent_groups.go index d1963c810..1dc8c09f6 100644 --- a/fleet/postgres/agent_groups.go +++ b/fleet/postgres/agent_groups.go @@ -392,18 +392,28 @@ func toDBAgentGroup(group fleet.AgentGroup) (dbAgentGroup, error) { description = *group.Description } + groupTags := make(db.Tags) + if group.Tags != nil { + groupTags = db.Tags(*group.Tags) + } + return dbAgentGroup{ ID: group.ID, Name: group.Name, Description: description, MFOwnerID: group.MFOwnerID, MFChannelID: group.MFChannelID, - Tags: db.Tags(group.Tags), + Tags: groupTags, }, nil } func toAgentGroup(dba dbAgentGroup) (fleet.AgentGroup, error) { + groupTags := make(types.Tags) + if len(dba.Tags) != 0 { + groupTags.Merge(dba.Tags) + } + return fleet.AgentGroup{ ID: dba.ID, Name: dba.Name, @@ -411,7 +421,7 @@ func toAgentGroup(dba dbAgentGroup) (fleet.AgentGroup, error) { MFOwnerID: dba.MFOwnerID, MFChannelID: dba.MFChannelID, Created: dba.Created, - Tags: types.Tags(dba.Tags), + Tags: &groupTags, MatchingAgents: types.Metadata(dba.MatchingAgents), }, nil diff --git a/fleet/postgres/agent_groups_test.go b/fleet/postgres/agent_groups_test.go index c99814c05..2ffb84eda 100644 --- a/fleet/postgres/agent_groups_test.go +++ b/fleet/postgres/agent_groups_test.go @@ -45,7 +45,7 @@ func TestAgentGroupSave(t *testing.T) { Name: nameID, MFOwnerID: oID.String(), MFChannelID: chID.String(), - Tags: types.Tags{"testkey": "testvalue"}, + Tags: &types.Tags{"testkey": "testvalue"}, } groupCopy := group @@ -102,7 +102,7 @@ func TestAgentGroupRetrieve(t *testing.T) { Description: &description, MFOwnerID: oID.String(), MFChannelID: chID.String(), - Tags: types.Tags{"testkey": "testvalue"}, + Tags: &types.Tags{"testkey": "testvalue"}, } id, err := agentGroupRepo.Save(context.Background(), group) @@ -144,7 +144,7 @@ func TestAgentGroupRetrieve(t *testing.T) { assert.Equal(t, nameID, ag.Name, fmt.Sprintf("%s: expected %s got %s\n", desc, nameID, ag.Name)) } if len(tc.tags) > 0 { - assert.Equal(t, tc.tags, ag.Tags) + assert.Equal(t, tc.tags, *ag.Tags) } assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) }) @@ -175,7 +175,7 @@ func TestMultiAgentGroupRetrieval(t *testing.T) { Description: &description, MFOwnerID: oID.String(), MFChannelID: chID.String(), - Tags: types.Tags{"testkey": "testvalue"}, + Tags: &types.Tags{"testkey": "testvalue"}, } ag, err := agentGroupRepo.Save(context.Background(), group) @@ -297,7 +297,7 @@ func TestAgentGroupUpdate(t *testing.T) { Name: nameID, MFOwnerID: oID.String(), MFChannelID: chID.String(), - Tags: types.Tags{"testkey": "testvalue"}, + Tags: &types.Tags{"testkey": "testvalue"}, } groupID, err := groupRepo.Save(context.Background(), group) @@ -311,7 +311,7 @@ func TestAgentGroupUpdate(t *testing.T) { Name: nameConflict, MFOwnerID: oID.String(), MFChannelID: chID.String(), - Tags: types.Tags{"testkey": "testvalue"}, + Tags: &types.Tags{"testkey": "testvalue"}, } _, err = groupRepo.Save(context.Background(), groupConflictName) @@ -359,7 +359,7 @@ func TestAgentGroupUpdate(t *testing.T) { Name: nameConflict, MFOwnerID: oID.String(), MFChannelID: chID.String(), - Tags: types.Tags{"testkey": "testvalue"}, + Tags: &types.Tags{"testkey": "testvalue"}, }, err: errors.ErrConflict, }, @@ -393,7 +393,7 @@ func TestAgentGroupDelete(t *testing.T) { Name: nameID, MFOwnerID: oID.String(), MFChannelID: chID.String(), - Tags: types.Tags{"testkey": "testvalue"}, + Tags: &types.Tags{"testkey": "testvalue"}, } groupID, err := groupRepo.Save(context.Background(), group) @@ -454,7 +454,7 @@ func TestAgentGroupRetrieveAllByAgent(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, LastHBData: types.Metadata{"heartbeatdata": "testvalue"}, } @@ -473,7 +473,7 @@ func TestAgentGroupRetrieveAllByAgent(t *testing.T) { Description: &description, MFOwnerID: oID.String(), MFChannelID: chID.String(), - Tags: types.Tags{"testkey": "testvalue"}, + Tags: &types.Tags{"testkey": "testvalue"}, } ag, err := agentGroupRepo.Save(context.Background(), group) @@ -495,7 +495,7 @@ func TestAgentGroupRetrieveAllByAgent(t *testing.T) { MFThingID: wrongID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, LastHBData: types.Metadata{"heartbeatdata": "testvalue"}, }, @@ -537,7 +537,7 @@ func TestRetrieveMatchingGroups(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, LastHBData: types.Metadata{"heartbeatdata": "testvalue"}, } @@ -555,7 +555,7 @@ func TestRetrieveMatchingGroups(t *testing.T) { Description: &description, MFOwnerID: oID.String(), MFChannelID: chID.String(), - Tags: types.Tags{"testkey": "testvalue"}, + Tags: &types.Tags{"testkey": "testvalue"}, } ag, err := agentGroupRepo.Save(context.Background(), group) diff --git a/fleet/postgres/agents.go b/fleet/postgres/agents.go index 234ad72c4..dcea67bd9 100644 --- a/fleet/postgres/agents.go +++ b/fleet/postgres/agents.go @@ -531,12 +531,17 @@ type dbMatchingAgent struct { func toDBAgent(agent fleet.Agent) (dbAgent, error) { + orbTags := types.Tags{} + if agent.OrbTags != nil { + orbTags = *agent.OrbTags + } + a := dbAgent{ Name: agent.Name, MFOwnerID: agent.MFOwnerID, MFThingID: agent.MFThingID, MFChannelID: agent.MFChannelID, - OrbTags: db.Tags(agent.OrbTags), + OrbTags: db.Tags(orbTags), AgentTags: db.Tags(agent.AgentTags), AgentMetadata: db.Metadata(agent.AgentMetadata), State: agent.State, @@ -555,13 +560,15 @@ func toDBAgent(agent fleet.Agent) (dbAgent, error) { func toAgent(dba dbAgent) (fleet.Agent, error) { + orbTags := types.Tags(dba.OrbTags) + agent := fleet.Agent{ Name: dba.Name, MFOwnerID: dba.MFOwnerID, MFThingID: dba.MFThingID, MFChannelID: dba.MFChannelID, Created: dba.Created, - OrbTags: types.Tags(dba.OrbTags), + OrbTags: &orbTags, AgentTags: types.Tags(dba.AgentTags), AgentMetadata: types.Metadata(dba.AgentMetadata), State: dba.State, diff --git a/fleet/postgres/agents_test.go b/fleet/postgres/agents_test.go index 71974a279..7625561a1 100644 --- a/fleet/postgres/agents_test.go +++ b/fleet/postgres/agents_test.go @@ -61,7 +61,7 @@ func TestAgentSave(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, AgentMetadata: types.Metadata{"testkey": "testvalue"}, } @@ -154,7 +154,7 @@ func TestAgentRetrieve(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, AgentMetadata: types.Metadata{"testkey": "testvalue"}, } @@ -188,7 +188,7 @@ func TestAgentRetrieve(t *testing.T) { assert.Equal(t, nameID, ag.Name, fmt.Sprintf("%s: expected %s got %s\n", desc, nameID, ag.Name)) } if len(tc.tags) > 0 { - assert.Equal(t, tc.tags, ag.OrbTags) + assert.Equal(t, tc.tags, *ag.OrbTags) assert.Equal(t, tc.tags, ag.AgentTags) } assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) @@ -217,7 +217,7 @@ func TestAgentUpdateData(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, AgentMetadata: types.Metadata{"testkey": "testvalue"}, } @@ -409,7 +409,7 @@ func TestMultiAgentRetrieval(t *testing.T) { require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) th.AgentMetadata = metadata th.AgentTags = tags - th.OrbTags = subTags + th.OrbTags = &subTags err = agentRepo.Save(context.Background(), th) require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) @@ -577,7 +577,7 @@ func TestAgentUpdate(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, AgentMetadata: types.Metadata{"testkey": "testvalue"}, } @@ -587,7 +587,7 @@ func TestAgentUpdate(t *testing.T) { MFThingID: duplicatedThID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, AgentMetadata: types.Metadata{"testkey": "testvalue"}, } @@ -607,7 +607,7 @@ func TestAgentUpdate(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), Name: updatedNameID, - OrbTags: types.Tags{"newkey": "newvalue"}, + OrbTags: &types.Tags{"newkey": "newvalue"}, }, err: nil, }, @@ -616,7 +616,7 @@ func TestAgentUpdate(t *testing.T) { MFThingID: oID.String(), MFOwnerID: oID.String(), Name: updatedNameID, - OrbTags: types.Tags{"newkey": "newvalue"}, + OrbTags: &types.Tags{"newkey": "newvalue"}, }, err: errors.ErrNotFound, }, @@ -625,7 +625,7 @@ func TestAgentUpdate(t *testing.T) { MFThingID: "", MFOwnerID: oID.String(), Name: updatedNameID, - OrbTags: types.Tags{"newkey": "newvalue"}, + OrbTags: &types.Tags{"newkey": "newvalue"}, }, err: errors.ErrMalformedEntity, }, @@ -634,7 +634,7 @@ func TestAgentUpdate(t *testing.T) { MFThingID: thID.String(), MFOwnerID: "", Name: updatedNameID, - OrbTags: types.Tags{"newkey": "newvalue"}, + OrbTags: &types.Tags{"newkey": "newvalue"}, }, err: errors.ErrMalformedEntity, }, @@ -694,7 +694,7 @@ func TestDeleteAgent(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, AgentMetadata: types.Metadata{"testkey": "testvalue"}, } @@ -749,7 +749,7 @@ func TestAgentBackendTapsRetrieve(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, AgentMetadata: types.Metadata{"testkey": "testvalue"}, } @@ -815,7 +815,7 @@ func TestMultiAgentRetrievalByAgentGroup(t *testing.T) { Name: groupNameID, MFOwnerID: oID.String(), MFChannelID: chID.String(), - Tags: tags, + Tags: &tags, } id, err := agentGroupRepo.Save(context.Background(), group) @@ -838,7 +838,7 @@ func TestMultiAgentRetrievalByAgentGroup(t *testing.T) { require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) th.AgentMetadata = metadata th.AgentTags = tags - th.OrbTags = subTags + th.OrbTags = &subTags th.State = fleet.Online err = agentRepo.Save(context.Background(), th) @@ -914,7 +914,7 @@ func TestAgentRetrieveByID(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, AgentMetadata: types.Metadata{"testkey": "testvalue"}, } @@ -953,7 +953,7 @@ func TestAgentRetrieveByID(t *testing.T) { assert.Equal(t, nameID, ag.Name, fmt.Sprintf("%s: expected %s got %s\n", desc, nameID, ag.Name)) } if len(tc.tags) > 0 { - assert.Equal(t, tc.tags, ag.OrbTags) + assert.Equal(t, tc.tags, *ag.OrbTags) assert.Equal(t, tc.tags, ag.AgentTags) } assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) @@ -982,7 +982,7 @@ func TestRetrieveAgentInfoByChannelID(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, AgentMetadata: types.Metadata{"testkey": "testvalue"}, } @@ -1004,7 +1004,7 @@ func TestRetrieveAgentInfoByChannelID(t *testing.T) { ownerID: oID.String(), name: nameID.String(), agentTags: agent.AgentTags, - orbTags: agent.OrbTags, + orbTags: *agent.OrbTags, agentID: agent.MFThingID, err: nil, }, @@ -1026,7 +1026,7 @@ func TestRetrieveAgentInfoByChannelID(t *testing.T) { assert.Equal(t, tc.name, ag.Name.String(), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.name, ag.Name.String())) assert.Equal(t, tc.ownerID, ag.MFOwnerID, fmt.Sprintf("%s: expected %s got %s\n", desc, tc.ownerID, ag.MFOwnerID)) assert.Equal(t, tc.agentTags, ag.AgentTags, fmt.Sprintf("%s: expected %s got %s\n", desc, tc.agentTags, ag.AgentTags)) - assert.Equal(t, tc.orbTags, ag.OrbTags, fmt.Sprintf("%s: expected %s got %s\n", desc, tc.orbTags, ag.OrbTags)) + assert.Equal(t, tc.orbTags, *ag.OrbTags, fmt.Sprintf("%s: expected %s got %s\n", desc, tc.orbTags, *ag.OrbTags)) assert.Equal(t, tc.agentID, ag.MFThingID, fmt.Sprintf("%s: expected %s got %s\n", desc, tc.agentID, ag.MFThingID)) } assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) @@ -1089,7 +1089,7 @@ func TestMatchingAgentRetrieval(t *testing.T) { require.True(t, th.Name.IsValid(), "invalid Identifier name: %s") require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) th.AgentTags = agentTags - th.OrbTags = orbTags + th.OrbTags = &orbTags err = agentRepo.Save(context.Background(), th) require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) @@ -1178,7 +1178,7 @@ func TestSetAgentStale(t *testing.T) { MFThingID: thID.String(), MFOwnerID: oID.String(), MFChannelID: chID.String(), - OrbTags: types.Tags{"testkey": "testvalue"}, + OrbTags: &types.Tags{"testkey": "testvalue"}, AgentTags: types.Tags{"testkey": "testvalue"}, AgentMetadata: types.Metadata{"testkey": "testvalue"}, LastHB: time.Now().Add(fleet.DefaultTimeout), diff --git a/go.mod b/go.mod index 73a76a10f..134c89e6b 100644 --- a/go.mod +++ b/go.mod @@ -26,6 +26,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/ory/dockertest/v3 v3.8.1 github.com/pkg/profile v1.7.0 + github.com/plgd-dev/kit/v2 v2.0.0-20211006190727-057b33161b90 github.com/prometheus/client_golang v1.13.0 github.com/prometheus/prometheus v0.38.0 github.com/rubenv/sql-migrate v1.1.1 @@ -40,6 +41,8 @@ require ( google.golang.org/protobuf v1.28.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 + k8s.io/api v0.26.0 + k8s.io/apimachinery v0.26.0 ) //These libs are used to allow orb extend opentelemetry features @@ -54,7 +57,7 @@ require ( go.opentelemetry.io/otel/metric v0.32.1 go.opentelemetry.io/otel/trace v1.10.0 google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de - k8s.io/client-go v0.25.2 + k8s.io/client-go v0.26.0 ) require ( @@ -175,6 +178,8 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.4.1 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + github.com/ugorji/go v1.1.7 // indirect + github.com/ugorji/go/codec v1.1.7 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect @@ -186,22 +191,20 @@ require ( go.uber.org/goleak v1.2.0 // indirect golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/net v0.0.0-20220927171203-f486391704dc // indirect + golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 // indirect - golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/term v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 // indirect golang.org/x/tools v0.1.12 // indirect google.golang.org/api v0.98.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - k8s.io/api v0.25.2 // indirect - k8s.io/apimachinery v0.25.2 // indirect - k8s.io/klog/v2 v2.70.1 // indirect - k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect + k8s.io/klog/v2 v2.80.1 // indirect + k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) @@ -214,7 +217,7 @@ require ( github.com/eapache/go-resiliency v1.3.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect @@ -245,6 +248,6 @@ require ( github.com/xdg-go/stringprep v1.0.3 // indirect go.opentelemetry.io/collector/semconv v0.62.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.1 // indirect - k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect + k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect ) diff --git a/go.sum b/go.sum index 01b27555d..9a9c1e525 100644 --- a/go.sum +++ b/go.sum @@ -147,6 +147,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= @@ -223,6 +224,8 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dsnet/golib/memfile v0.0.0-20190531212259-571cdbcff553/go.mod h1:tXGNW9q3RwvWt1VV2qrRKlSSz0npnh12yftCSCy2T64= +github.com/dsnet/golib/memfile v0.0.0-20200723050859-c110804dfa93/go.mod h1:tXGNW9q3RwvWt1VV2qrRKlSSz0npnh12yftCSCy2T64= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= @@ -232,8 +235,8 @@ github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.4.1 h1:tUSpviiL5G3P9SZZJPC4ZULZJsxQKXxfENpMvdbAXAI= github.com/eclipse/paho.mqtt.golang v1.4.1/go.mod h1:JGt0RsEwEX+Xa/agj90YJ9d9DH2b7upDZMK9HRbFvCA= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -271,6 +274,7 @@ github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-acme/lego v2.7.2+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-cmd/cmd v1.3.0 h1:Wet2eYkLouFqyiG+x6P6l8CICRywhRD6sjMNalTSvbs= github.com/go-cmd/cmd v1.3.0/go.mod h1:l/X/csRuYRDqiQIz9PPJBn4xDrdxgBXeLE9x1BeFU6M= github.com/go-co-op/gocron v1.9.0 h1:+V+DDenw3ryB7B+tK1bAIC5p0ruw4oX9IqAsdRnGIf0= @@ -294,13 +298,14 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ocf/go-coap/v2 v2.0.4-0.20200728125043-f38b86f047a7/go.mod h1:X9wVKcaOSx7wBxKcvrWgMQq1R2DNeA7NBLW2osIb8TM= +github.com/go-ocf/kit v0.0.0-20200728130040-4aebdb6982bc/go.mod h1:TIsoMT/iB7t9P6ahkcOnsmvS83SIJsv9qXRfz/yLf6M= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= @@ -337,10 +342,12 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= @@ -457,6 +464,7 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 github.com/gophercloud/gophercloud v0.25.0 h1:C3Oae7y0fUVQGSsBrb3zliAjdX+riCSEh4lNMejFNI4= github.com/gophercloud/gophercloud v0.25.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -466,6 +474,7 @@ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k= github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -582,6 +591,7 @@ github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -608,9 +618,12 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/knadh/koanf v1.4.3 h1:rSJcSH5LSFhvzBRsAYfT3k7eLP0I4UxeZqjtAatk+wc= @@ -630,6 +643,9 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lestrrat-go/iter v0.0.0-20200422075355-fc1769541911/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= +github.com/lestrrat-go/jwx v1.0.2/go.mod h1:TPF17WiSFegZo+c20fdpw49QD+/7n4/IsGvEmCSWwT0= +github.com/lestrrat-go/pdebug v0.0.0-20200204225717-4d6bd78da58d/go.mod h1:B06CSso/AWxiPejj+fheUINGeBKeeEZNt8w+EoU7+L8= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -680,6 +696,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= @@ -752,11 +769,11 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= +github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.62.0 h1:EoidrEk6Dmap+Cw+lXipNL7IVGicS0N6V+oCvesIj/c= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.62.0/go.mod h1:4BwkK9Fb1xZDxmXt7gSm5nxCxtVWJf61/UaCt54gVjU= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.62.0 h1:PMUgwDspM+2DX2Ol8Tj/jUBQqzvykVwnFily/HjRDPA= @@ -787,15 +804,18 @@ github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6i github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ory/dockertest/v3 v3.8.1 h1:vU/8d1We4qIad2YM0kOwRVtnyue7ExvacPiw1yDm17g= github.com/ory/dockertest/v3 v3.8.1/go.mod h1:wSRQ3wmkz+uSARYMk7kVJFDBGm8x5gSxIhI7NDc+BAQ= github.com/ory/keto/proto/ory/keto/acl/v1alpha1 v0.0.0-20210616104402-80e043246cf9 h1:gP86NkMkUlqMOTjFQ8lt8T1HbHtCJGGeeeh/6c+nla0= github.com/ory/keto/proto/ory/keto/acl/v1alpha1 v0.0.0-20210616104402-80e043246cf9/go.mod h1:8IoeBQqIRKWU5L6dTKQTlTwVhlUawpqSBJZWfLLN4FM= +github.com/panjf2000/ants/v2 v2.4.3/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= @@ -807,6 +827,13 @@ github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaF github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pion/dtls/v2 v2.0.1-0.20200503085337-8e86b3a7d585/go.mod h1:/GahSOC8ZY/+17zkaGJIG4OUkSGAcZu/N/g3roBOCkM= +github.com/pion/dtls/v2 v2.0.10-0.20210502094952-3dc563b9aede/go.mod h1:86wv5dgx2J/z871nUR+5fTTY9tISLUlo+C5Gm86r1Hs= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/transport v0.10.0/go.mod h1:BnHnUipd0rZQyTVB2SBGojFHT9CBt5C5TcsJSQGkvSE= +github.com/pion/transport v0.12.2/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q= +github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZIWJ6q9A= +github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -816,6 +843,11 @@ github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/plgd-dev/go-coap/v2 v2.0.4-0.20200819112225-8eb712b901bc/go.mod h1:+tCi9Q78H/orWRtpVWyBgrr4vKFo2zYtbbxUllerBp4= +github.com/plgd-dev/go-coap/v2 v2.4.1-0.20210517130748-95c37ac8e1fa/go.mod h1:rA7fc7ar+B/qa+Q0hRqv7yj/EMtIlmo1l7vkQGSrHPU= +github.com/plgd-dev/kit v0.0.0-20200819113605-d5fcf3e94f63/go.mod h1:Yl9zisyXfPdtP9hTWlJqjJYXmgU/jtSDKttz9/CeD90= +github.com/plgd-dev/kit/v2 v2.0.0-20211006190727-057b33161b90 h1:TC1HJ/UbyflJFPvaOdGmNZ5TeFGex1/dyr9urNGLy7M= +github.com/plgd-dev/kit/v2 v2.0.0-20211006190727-057b33161b90/go.mod h1:Z7oKFLSGQjdi8eInxwFCs0tSApuEM1o0qNck+sJYp4M= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -901,7 +933,6 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= @@ -958,7 +989,14 @@ github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.12.0/go.mod h1:229t1eWu9UXTPmoUkbpN/fctKPBY4IJoFXQnxHGXy6E= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= @@ -1025,16 +1063,20 @@ go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/A go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= @@ -1047,6 +1089,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1136,6 +1180,7 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -1144,6 +1189,7 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210502030024-e5908800b52b/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1160,8 +1206,8 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20220927171203-f486391704dc h1:FxpXZdoBqT8RjqTy6i1E8nXHhW21wK7ptQ/EPIGxzPQ= -golang.org/x/net v0.0.0-20220927171203-f486391704dc/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= +golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1303,11 +1349,12 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2 h1:wM1k/lXfpc5HdkJJyW9GELpd8ERGdnh8sMGL6Gzq3Ho= -golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1317,8 +1364,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1326,6 +1374,7 @@ golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7K golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1343,12 +1392,15 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1362,6 +1414,7 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200417140056-c07e33ef3290/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1603,6 +1656,7 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -1611,6 +1665,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1631,19 +1686,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8= -k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0= -k8s.io/apimachinery v0.25.2 h1:WbxfAjCx+AeN8Ilp9joWnyJ6xu9OMeS/fsfjK/5zaQs= -k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA= -k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo= -k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= +k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= +k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= +k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= +k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= +k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/kind/Chart.lock b/kind/Chart.lock index 4e8b08b1c..53732b11b 100644 --- a/kind/Chart.lock +++ b/kind/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: orb - repository: https://ns1labs.github.io/orb-helm/ - version: 1.0.39 -digest: sha256:d7ae1413691620cf6f7a03fff2b562a9ca79fc5da1d1f4892ed364d45486d56d -generated: "2022-12-19T21:18:59.933210512-03:00" + repository: https://orb-community.github.io/orb-helm/ + version: 1.0.44 +digest: sha256:054a0e4810a7d857f4c0b156bb92e909f485096242098f62ab5b558140e48a22 +generated: "2023-02-13T13:18:58.67925487-03:00" diff --git a/kind/Chart.yaml b/kind/Chart.yaml index 92074223f..267789e22 100644 --- a/kind/Chart.yaml +++ b/kind/Chart.yaml @@ -17,5 +17,5 @@ appVersion: "1.0.0" dependencies: - name: orb - version: "1.0.39" - repository: "@ns1labs-orb" + version: "1.0.44" + repository: "@orb-community" diff --git a/kind/README.md b/kind/README.md index a5aaa5933..aafc51dc4 100644 --- a/kind/README.md +++ b/kind/README.md @@ -1,6 +1,7 @@ # Orb local k8s cluster -Follow those steps to setup a local k8s cluster and deploy Orb. +The following steps must be performed at the **root of the Orb project** to set up a local k8s cluster and deploy Orb. + ## 🧱 Requirements @@ -11,6 +12,8 @@ Follow those steps to setup a local k8s cluster and deploy Orb. > **💡 Note:** If you have those installed, please skip to [Deploy Orb on Kind](#deploy-orb-kind). +> ⚠️ You may need to permit ports 80 and 443 (*ingress*) because of [kubernetes](https://kubernetes.io/docs/concepts/services-networking/ingress/). + ## 🐳 Docker Environment (Requirement) @@ -23,11 +26,13 @@ Check if you have a **Docker** running by executing: ```shell docker version ``` -If you need help to setup a **Docker Environment**, follow the [steps from here](https://docs.docker.com/engine/install/debian/). +If you need help to set up a **Docker Environment**, follow the [steps from here](https://docs.docker.com/engine/install/debian/). ## ⚓ Helm 3 (Requirement) +[Helm](https://helm.sh/) is a package manager for Kubernetes. A Helm Chart is a package that allows you to customize your deployment on Kubernetes. + Quick install a **Helm 3** executing: ```shell make install-helm @@ -39,7 +44,7 @@ helm version ``` If you need help to install **Helm 3**, follow the [steps from here](https://helm.sh/docs/intro/install/). -> 🚨 **Warning:** Make sure you have version 3 installed, orb helm charts doesn't officialy support helm 2. +> 🚨 **Warning:** Make sure you have version 3 installed, orb helm charts doesn't officially support helm 2. ## 🐋 Kubectl (Requirement) @@ -169,7 +174,7 @@ Load just one image to the kind cluster kind load docker-image ns1labs/orb-maestro:0.22.0-088bee14 ``` -> **💡 Note:** Dont forget to change **kind/values.yaml** manifest to use your image tag +> **💡 Note:** Do not forget to change **kind/values.yaml** manifest to use your image tag Install orb application: @@ -188,7 +193,7 @@ kubectl rollout restart deployment -n orb ## Updating inflight service with recent development -If you want to change a service, lets say you added some logs to the fleet service, before commiting the changes, add this +If you want to change a service, lets say you added some logs to the fleet service, before committing the changes, add this ```shell SERVICE=fleet make build_docker ``` @@ -197,3 +202,6 @@ After changing you can simply execute ```shell make kind-upgrade-all ``` + +
+❌ Is it not working correctly? Found a bug? Come talk to us [live on Slack](https://netdev.chat/) in the `#orb` channel, or [file a GitHub issue here](https://github.com/orb-community/orb/issues/new/choose). diff --git a/kind/values.yaml b/kind/values.yaml index 10ae99083..44775a650 100644 --- a/kind/values.yaml +++ b/kind/values.yaml @@ -110,17 +110,17 @@ orb: repository: ns1labs tag: "develop" #0.22.0-develop-478d318 rbac: - create: true serviceAccountName: "k8s-maestro-role" serviceAccountNamespace: "orb" ClusterRole: "cluster-admin" + ClusterRoleBindingCreate: true migrate: image: name: "orb-migrate" pullPolicy: "IfNotPresent" repository: ns1labs - tag: "0.17.0-11889e3a" + tag: "develop" autoMigrate: false logLevel: "debug" diff --git a/maestro/config/config_builder.go b/maestro/config/config_builder.go index 3850aed6f..2e65815d3 100644 --- a/maestro/config/config_builder.go +++ b/maestro/config/config_builder.go @@ -80,7 +80,7 @@ var k8sOtelCollector = ` "containers": [ { "name": "otel-collector", - "image": "otel/opentelemetry-collector-contrib:0.60.0", + "image": "otel/opentelemetry-collector-contrib:0.68.0", "ports": [ { "containerPort": 13133, @@ -186,6 +186,182 @@ var k8sOtelCollector = ` } ` +var JsonService = ` +{ + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "otel-SINK_ID", + "creationTimestamp": null, + "labels": { + "app": "opentelemetry", + "component": "otel-collector-SINK_ID" + } + }, + "spec": { + "ports": [ + { + "name": "metrics", + "protocol": "TCP", + "port": 8888, + "targetPort": 8888 + }, + { + "name": "healthcheck", + "protocol": "TCP", + "port": 13133, + "targetPort": 13133 + } + ], + "selector": { + "component": "otel-collector-SINK_ID" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": { + } + } +} +` + +var JsonConfigMap = ` +{ + "kind": "ConfigMap", + "apiVersion": "v1", + "metadata": { + "name": "otel-collector-config-SINK_ID", + "creationTimestamp": null + }, + "data": { + "config.yaml": "SINK_CONFIG" + } +} +` + +var JsonDeployment = ` +{ + "kind": "Deployment", + "apiVersion": "apps/v1", + "metadata": { + "name": "otel-SINK_ID", + "creationTimestamp": null, + "labels": { + "app": "opentelemetry", + "component": "otel-collector" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "app": "opentelemetry", + "component": "otel-collector-SINK_ID" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "app": "opentelemetry", + "component": "otel-collector-SINK_ID" + } + }, + "spec": { + "volumes": [ + { + "name": "varlog", + "hostPath": { + "path": "/var/log", + "type": "" + } + }, + { + "name": "varlibdockercontainers", + "hostPath": { + "path": "/var/lib/docker/containers", + "type": "" + } + }, + { + "name": "data", + "configMap": { + "name": "otel-collector-config-SINK_ID", + "defaultMode": 420 + } + } + ], + "containers": [ + { + "name": "otel-collector", + "image": "otel/opentelemetry-collector-contrib:0.68.0", + "ports": [ + { + "containerPort": 13133, + "protocol": "TCP" + }, + { + "containerPort": 8888, + "protocol": "TCP" + } + ], + "resources": { + "limits": { + "cpu": "100m", + "memory": "200Mi" + }, + "requests": { + "cpu": "100m", + "memory": "200Mi" + } + }, + "volumeMounts": [ + { + "name": "varlog", + "readOnly": true, + "mountPath": "/var/log" + }, + { + "name": "varlibdockercontainers", + "readOnly": true, + "mountPath": "/var/lib/docker/containers" + }, + { + "name": "data", + "readOnly": true, + "mountPath": "/etc/otelcol-contrib/config.yaml", + "subPath": "config.yaml" + } + ], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "IfNotPresent" + } + ], + "restartPolicy": "Always", + "terminationGracePeriodSeconds": 30, + "dnsPolicy": "ClusterFirst", + "securityContext": { + }, + "schedulerName": "default-scheduler" + } + }, + "strategy": { + "type": "RollingUpdate", + "rollingUpdate": { + "maxUnavailable": "25%", + "maxSurge": "25%" + } + }, + "revisionHistoryLimit": 10, + "progressDeadlineSeconds": 600 + }, + "status": { + } + } +` + func GetDeploymentJson(kafkaUrl, sinkId, sinkUrl, sinkUsername, sinkPassword string) (string, error) { // prepare manifest manifest := strings.Replace(k8sOtelCollector, "SINK_ID", sinkId, -1) @@ -197,6 +373,26 @@ func GetDeploymentJson(kafkaUrl, sinkId, sinkUrl, sinkUsername, sinkPassword str return manifest, nil } +func GetDeploymentApplyConfig(sinkId string) string { + manifest := strings.Replace(JsonDeployment, "SINK_ID", sinkId, -1) + return manifest +} + +func GetConfigMapApplyConfig(kafkaUrl, sinkId, sinkUrl, sinkUsername, sinkPassword string) (string, error) { + manifest := strings.Replace(JsonConfigMap, "SINK_ID", sinkId, -1) + config, err := ReturnConfigYamlFromSink(context.Background(), kafkaUrl, sinkId, sinkUrl, sinkUsername, sinkPassword) + if err != nil { + return "", errors.Wrap(errors.New("failed to build YAML"), err) + } + manifest = strings.Replace(manifest, "SINK_CONFIG", config, -1) + return manifest, nil +} + +func GetServiceApplyConfig(sinkId string) string { + manifest := strings.Replace(JsonService, "SINK_ID", sinkId, -1) + return manifest +} + // ReturnConfigYamlFromSink this is the main method, which will generate the YAML file from the func ReturnConfigYamlFromSink(_ context.Context, kafkaUrlConfig, sinkId, sinkUrl, sinkUsername, sinkPassword string) (string, error) { config := OtelConfigFile{ @@ -208,9 +404,6 @@ func ReturnConfigYamlFromSink(_ context.Context, kafkaUrlConfig, sinkId, sinkUrl }, }, Extensions: &Extensions{ - HealthCheckExtConfig: &HealthCheckExtension{ - Endpoint: "0.0.0.0:13133", - }, PProf: &PProfExtension{ Endpoint: "0.0.0.0:1888", // Leaving default for now, will need to change with more processes }, @@ -228,9 +421,14 @@ func ReturnConfigYamlFromSink(_ context.Context, kafkaUrlConfig, sinkId, sinkUrl Authenticator string `json:"authenticator" yaml:"authenticator"` }{Authenticator: "basicauth/exporter"}, }, + LoggingExporter: &LoggingExporterConfig{ + Verbosity: "detailed", + SamplingInitial: 5, + SamplingThereAfter: 50, + }, }, Service: ServiceConfig{ - Extensions: []string{"pprof", "health_check", "basicauth/exporter"}, + Extensions: []string{"pprof", "basicauth/exporter"}, Pipelines: struct { Metrics struct { Receivers []string `json:"receivers" yaml:"receivers"` @@ -291,12 +489,15 @@ type Extensions struct { } type HealthCheckExtension struct { - Endpoint string `json:"endpoint" yaml:"endpoint"` - CollectorPipeline *struct { - Enabled bool `json:"enabled" yaml:"enabled"` - Interval string `json:"interval" yaml:"interval"` - FailureThreshold int32 `json:"exporter_failure_threshold" yaml:"exporter_failure_threshold"` - } `json:"check_collector_pipeline,omitempty" yaml:"check_collector_pipeline,omitempty"` + Endpoint string `json:"endpoint" yaml:"endpoint"` + Path string `json:"path" yaml:"path"` + CollectorPipeline *CollectorPipelineExtension `json:"check_collector_pipeline,omitempty" yaml:"check_collector_pipeline,omitempty"` +} + +type CollectorPipelineExtension struct { + Enabled string `json:"enabled" yaml:"enabled"` + Interval string `json:"interval" yaml:"interval"` + FailureThreshold int32 `json:"exporter_failure_threshold" yaml:"exporter_failure_threshold"` } type PProfExtension struct { @@ -316,6 +517,13 @@ type BasicAuthenticationExtension struct { type Exporters struct { PrometheusRemoteWrite *PrometheusRemoteWriteExporterConfig `json:"prometheusremotewrite,omitempty" yaml:"prometheusremotewrite,omitempty"` + LoggingExporter *LoggingExporterConfig `json:"logging,omitempty" yaml:"logging,omitempty"` +} + +type LoggingExporterConfig struct { + Verbosity string `json:"verbosity,omitempty" yaml:"verbosity,omitempty"` + SamplingInitial int `json:"sampling_initial,omitempty" yaml:"sampling_initial,omitempty"` + SamplingThereAfter int `json:"sampling_thereafter,omitempty" yaml:"sampling_thereafter,omitempty"` } type PrometheusRemoteWriteExporterConfig struct { diff --git a/maestro/config/config_builder_test.go b/maestro/config/config_builder_test.go index 81e381daa..3b13e0585 100644 --- a/maestro/config/config_builder_test.go +++ b/maestro/config/config_builder_test.go @@ -27,7 +27,7 @@ func TestReturnConfigYamlFromSink(t *testing.T) { sinkUrl: "https://mysinkurl:9922", sinkUsername: "1234123", sinkPassword: "CarnivorousVulgaris", - }, want: `---\nreceivers:\n kafka:\n brokers:\n - kafka:9092\n topic: otlp_metrics-sink-id-222\n protocol_version: 2.0.0\nextensions:\n health_check:\n endpoint: 0.0.0.0:13133\n pprof:\n endpoint: 0.0.0.0:1888\n basicauth/exporter:\n client_auth:\n username: 1234123\n password: CarnivorousVulgaris\nexporters:\n prometheusremotewrite:\n endpoint: https://mysinkurl:9922\n auth:\n authenticator: basicauth/exporter\nservice:\n extensions:\n - pprof\n - health_check\n - basicauth/exporter\n pipelines:\n metrics:\n receivers:\n - kafka\n exporters:\n - prometheusremotewrite\n`, + }, want: `---\nreceivers:\n kafka:\n brokers:\n - kafka:9092\n topic: otlp_metrics-sink-id-222\n protocol_version: 2.0.0\nextensions:\n pprof:\n endpoint: 0.0.0.0:1888\n basicauth/exporter:\n client_auth:\n username: 1234123\n password: CarnivorousVulgaris\nexporters:\n prometheusremotewrite:\n endpoint: https://mysinkurl:9922\n auth:\n authenticator: basicauth/exporter\n logging:\n verbosity: detailed\n sampling_initial: 5\n sampling_thereafter: 50\nservice:\n extensions:\n - pprof\n - basicauth/exporter\n pipelines:\n metrics:\n receivers:\n - kafka\n exporters:\n - prometheusremotewrite\n`, wantErr: false}, } for _, tt := range tests { diff --git a/maestro/config/types.go b/maestro/config/types.go index 1f7482f49..b24ead999 100644 --- a/maestro/config/types.go +++ b/maestro/config/types.go @@ -13,6 +13,7 @@ type SinkData struct { Password string `json:"password"` OpenTelemetry string `json:"opentelemetry"` State PrometheusState `json:"state,omitempty"` + Migrate string `json:"migrate,omitempty"` Msg string `json:"msg,omitempty"` LastRemoteWrite time.Time `json:"last_remote_write,omitempty"` } @@ -22,6 +23,7 @@ const ( Active Error Idle + Warning ) type PrometheusState int @@ -31,6 +33,7 @@ var promStateMap = [...]string{ "active", "error", "idle", + "warning", } var promStateRevMap = map[string]PrometheusState{ @@ -38,14 +41,15 @@ var promStateRevMap = map[string]PrometheusState{ "active": Active, "error": Error, "idle": Idle, + "warning": Warning, } func (p PrometheusState) String() string { return promStateMap[p] } -func (p *PrometheusState) Scan(value interface{}) error { - *p = promStateRevMap[string(value.([]byte))] +func (p *PrometheusState) SetFromString(value string) error { + *p = promStateRevMap[value] return nil } diff --git a/maestro/kubecontrol/config_parse.go b/maestro/kubecontrol/config_parse.go new file mode 100644 index 000000000..d51cc68f6 --- /dev/null +++ b/maestro/kubecontrol/config_parse.go @@ -0,0 +1,102 @@ +package kubecontrol + +// TODO Might need this in the future, keeping it for now +//import ( +// "fmt" +// k8scorev1 "k8s.io/api/core/v1" +// "k8s.io/apimachinery/pkg/api/resource" +// k8sv1acapps "k8s.io/client-go/applyconfigurations/apps/v1" +// k8sv1accore "k8s.io/client-go/applyconfigurations/core/v1" +// k8sv1acmeta "k8s.io/client-go/applyconfigurations/meta/v1" +//) +// +//func CreateDeploymentApplyConfig(otelConfigYaml, sinkID string) (k8sacv1apps.DeploymentApplyConfiguration, error) { +// _ := buildConfigMapEntry(otelConfigYaml, sinkID) +// deployment := buildDeployment(sinkID) +// +// return deployment, nil +//} +// +//func buildDeployment(sinkID string) k8sacv1apps.DeploymentApplyConfiguration { +// deploymentMetaName := fmt.Sprintf("otel-%s", sinkID) +// configMapNameMeta := fmt.Sprintf("otel-collector-config-%s", sinkID) +// deploymentMetaLabelComponent := fmt.Sprintf("otel-collector-%s", sinkID) +// labels := map[string]string{ +// "app": "opentelemetry", +// "component": deploymentMetaLabelComponent, +// } +// deployment := k8sacv1apps.DeploymentApplyConfiguration{ +// ObjectMetaApplyConfiguration: &k8sacv1meta.ObjectMetaApplyConfiguration{ +// Name: &deploymentMetaName, +// CreationTimestamp: nil, +// Labels: labels, +// }, +// } +// deploymentSpec := k8sacv1apps.DeploymentSpec() +// deploymentSpec.WithReplicas(1) +// selector := k8sacv1meta.LabelSelector() +// selector.WithMatchLabels(labels) +// deploymentSpec.WithSelector(selector) +// templateSpec := k8sacv1core.PodTemplateSpecApplyConfiguration{ +// ObjectMetaApplyConfiguration: getMetadata(deploymentMetaLabelComponent), +// } +// templateSpec.WithLabels(labels) +// podSpec := k8sacv1core.PodSpec() +// logVolume := k8sacv1core.Volume() +// logVolume.WithName("varlog") +// logVolume.WithHostPath(k8sacv1core.HostPathVolumeSource().WithPath("/var/log").WithType("")) +// containersVolume := k8sacv1core.Volume() +// containersVolume.WithName("varlibdockercontainers") +// containersVolume.WithHostPath(k8sacv1core.HostPathVolumeSource().WithPath("/var/lib/docker/containers").WithType("")) +// configVolume := k8sacv1core.Volume() +// configVolume.WithName("data").WithConfigMap(k8sacv1core.ConfigMapVolumeSource().WithName(configMapNameMeta).WithDefaultMode(420)) +// podSpec.WithVolumes(logVolume, containersVolume, configVolume) +// templateSpec.WithSpec(podSpec) +// containerPodSpec := k8sacv1core.Container() +// containerPodSpec.WithName("otel-collector").WithImage("otel/opentelemetry-collector-contrib:0.68.0") +// heathCheckPort := k8sacv1core.ContainerPort().WithContainerPort(13133).WithProtocol(k8scorev1.ProtocolTCP) +// pprofPort := k8sacv1core.ContainerPort().WithContainerPort(8888).WithProtocol(k8scorev1.ProtocolTCP) +// containerPodSpec.WithPorts(heathCheckPort, pprofPort) +// +// cpuQuantity := resource.NewQuantity(100, "m") +// memQuantity := resource.NewQuantity(200, "Mi") +// resourceReqs := k8sacv1core.ResourceRequirements().WithLimits(map[k8scorev1.ResourceName]resource.Quantity{ +// k8scorev1.ResourceCPU: *cpuQuantity, +// k8scorev1.ResourceMemory: *memQuantity, +// }).WithRequests(map[k8scorev1.ResourceName]resource.Quantity{ +// k8scorev1.ResourceCPU: *cpuQuantity, +// k8scorev1.ResourceMemory: *memQuantity, +// }) +// containerPodSpec.WithResources(resourceReqs) +// logVolumeMount := k8sacv1core.VolumeMount() +// logVolumeMount.WithName("varlog") +// containerPodSpec.WithVolumeMounts(k8sacv1core.VolumeMount().) +// podSpec.WithContainers() +// deploymentSpec.WithTemplate(&templateSpec) +// deployment.WithAPIVersion("k8sacv1core") +// deployment.WithKind("Deployment") +// deployment.WithSpec() +// return deployment +//} +// +//func buildConfigMapEntry(otelConfigYaml string, sinkID string) *k8sacv1core.ConfigMapApplyConfiguration { +// configMapNameMeta := fmt.Sprintf("otel-collector-config-%s", sinkID) +// configMapDataEntries := make(map[string]string) +// configMapDataEntries["config.yaml"] = otelConfigYaml +// metaApplyConfiguration := getMetadata(configMapNameMeta) +// configMap := k8sacv1core.ConfigMapApplyConfiguration{ +// ObjectMetaApplyConfiguration: metaApplyConfiguration, +// } +// configMap.WithKind("ConfigMap") +// configMap.WithAPIVersion("k8sacv1core") +// configMap.WithData(configMapDataEntries) +// return &configMap +//} +// +//func getMetadata(metadaName string) *k8sacv1meta.ObjectMetaApplyConfiguration { +// metaApplyConfiguration := &k8sacv1meta.ObjectMetaApplyConfiguration{ +// Name: &metadaName, +// CreationTimestamp: nil, +// } +// return metaApplyConfiguration +//} diff --git a/maestro/kubecontrol/kubecontrol.go b/maestro/kubecontrol/kubecontrol.go index 80ac58eff..97fed08c7 100644 --- a/maestro/kubecontrol/kubecontrol.go +++ b/maestro/kubecontrol/kubecontrol.go @@ -4,11 +4,17 @@ import ( "bufio" "context" "fmt" + _ "github.com/ns1labs/orb/maestro/config" + "github.com/ns1labs/orb/pkg/errors" + "go.uber.org/zap" + k8sappsv1 "k8s.io/api/apps/v1" + k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "os" "os/exec" "strings" - - "go.uber.org/zap" + "time" ) const namespace = "otelcollectors" @@ -16,113 +22,136 @@ const namespace = "otelcollectors" var _ Service = (*deployService)(nil) type deployService struct { - logger *zap.Logger - deploymentState map[string]bool + logger *zap.Logger + clientSet *kubernetes.Clientset } func NewService(logger *zap.Logger) Service { - deploymentState := make(map[string]bool) - return &deployService{logger: logger, deploymentState: deploymentState} + clusterConfig, err := rest.InClusterConfig() + if err != nil { + logger.Error("error on get cluster config", zap.Error(err)) + return nil + } + clientSet, err := kubernetes.NewForConfig(clusterConfig) + if err != nil { + logger.Error("error on get client", zap.Error(err)) + return nil + } + return &deployService{logger: logger, clientSet: clientSet} } type Service interface { // CreateOtelCollector - create an existing collector by id - CreateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error + CreateOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error // DeleteOtelCollector - delete an existing collector by id - DeleteOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error + DeleteOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error // UpdateOtelCollector - update an existing collector by id - UpdateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error + UpdateOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error } -func (svc *deployService) collectorDeploy(_ context.Context, operation, sinkId, manifest string) error { - +func (svc *deployService) collectorDeploy(ctx context.Context, operation, ownerID, sinkId, manifest string) error { + _, status, err := svc.getDeploymentState(ctx, ownerID, sinkId) fileContent := []byte(manifest) tmp := strings.Split(string(fileContent), "\n") newContent := strings.Join(tmp[1:], "\n") - - if operation == "apply" { - if value, ok := svc.deploymentState[sinkId]; ok && value { - svc.logger.Info("Already applied Sink ID=" + sinkId) - return nil - } - } else if operation == "delete" { - if value, ok := svc.deploymentState[sinkId]; ok && !value { - svc.logger.Info("Already deleted Sink ID=" + sinkId) - return nil + if err != nil { + if status == "broken" { + operation = "delete" } } - - err := os.WriteFile("/tmp/otel-collector-"+sinkId+".json", []byte(newContent), 0644) + err = os.WriteFile("/tmp/otel-collector-"+sinkId+".json", []byte(newContent), 0644) if err != nil { svc.logger.Error("failed to write file content", zap.Error(err)) return err } + stdOutListenFunction := func(out *bufio.Scanner, err *bufio.Scanner) { + for out.Scan() { + svc.logger.Info("Deploy Info: " + out.Text()) + } + for err.Scan() { + svc.logger.Info("Deploy Error: " + err.Text()) + } + } // execute action cmd := exec.Command("kubectl", operation, "-f", "/tmp/otel-collector-"+sinkId+".json", "-n", namespace) + _, _, err = execCmd(ctx, cmd, svc.logger, stdOutListenFunction) + if err == nil { + svc.logger.Info(fmt.Sprintf("successfully %s the otel-collector for sink-id: %s", operation, sinkId)) + } + + return nil +} + +func execCmd(_ context.Context, cmd *exec.Cmd, logger *zap.Logger, stdOutFunc func(stdOut *bufio.Scanner, stdErr *bufio.Scanner)) (*bufio.Scanner, *bufio.Scanner, error) { stdoutReader, _ := cmd.StdoutPipe() stdoutScanner := bufio.NewScanner(stdoutReader) - go func() { - for stdoutScanner.Scan() { - fmt.Println(stdoutScanner.Text()) - svc.logger.Info("Deploy Info: " + stdoutScanner.Text()) - } - }() stderrReader, _ := cmd.StderrPipe() stderrScanner := bufio.NewScanner(stderrReader) - go func() { - for stderrScanner.Scan() { - fmt.Println(stderrScanner.Text()) - svc.logger.Info("Deploy Error: " + stderrScanner.Text()) - } - }() - err = cmd.Start() + go stdOutFunc(stdoutScanner, stderrScanner) + err := cmd.Start() if err != nil { - fmt.Printf("Error : %v \n", err) - svc.logger.Error("Collector Deploy Error", zap.Error(err)) + logger.Error("Collector Deploy Error", zap.Error(err)) } err = cmd.Wait() if err != nil { - fmt.Printf("Error: %v \n", err) - svc.logger.Error("Collector Deploy Error", zap.Error(err)) + logger.Error("Collector Deploy Error", zap.Error(err)) } + return stdoutScanner, stderrScanner, err +} - if err == nil { - if operation == "apply" { - svc.deploymentState[sinkId] = true - } else if operation == "delete" { - svc.deploymentState[sinkId] = false +func (svc *deployService) getDeploymentState(ctx context.Context, _, sinkId string) (deploymentName string, status string, err error) { + // Since this can take a while to be retrieved, we need to have a wait mechanism + for i := 0; i < 5; i++ { + deploymentList, err2 := svc.clientSet.AppsV1().Deployments(namespace).List(ctx, k8smetav1.ListOptions{}) + if err2 != nil { + svc.logger.Error("error on reading pods", zap.Error(err2)) + return "", "", err2 + } + for _, deployment := range deploymentList.Items { + if strings.Contains(deployment.Name, sinkId) { + svc.logger.Info("found deployment for sink") + deploymentName = deployment.Name + if len(deployment.Status.Conditions) == 0 || deployment.Status.Conditions[0].Type == k8sappsv1.DeploymentReplicaFailure { + svc.logger.Error("error on retrieving collector, deployment is broken") + return "", "broken", errors.New("error on retrieving collector, deployment is broken") + } + status = "active" + return + } } } - - return nil + status = "deleted" + return "", "deleted", nil } -func (svc *deployService) CreateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error { - err := svc.collectorDeploy(ctx, "apply", sinkID, deploymentEntry) - +func (svc *deployService) CreateOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error { + err := svc.collectorDeploy(ctx, "apply", ownerID, sinkID, deploymentEntry) if err != nil { return err } + return nil } -func (svc *deployService) UpdateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error { - err := svc.DeleteOtelCollector(ctx, sinkID, deploymentEntry) +func (svc *deployService) UpdateOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error { + err := svc.DeleteOtelCollector(ctx, ownerID, sinkID, deploymentEntry) if err != nil { return err } - err = svc.CreateOtelCollector(ctx, sinkID, deploymentEntry) + // Time to wait until K8s completely removes before re-creating + time.Sleep(3 * time.Second) + err = svc.CreateOtelCollector(ctx, ownerID, sinkID, deploymentEntry) if err != nil { return err } return nil } -func (svc *deployService) DeleteOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error { - err := svc.collectorDeploy(ctx, "delete", sinkID, deploymentEntry) +func (svc *deployService) DeleteOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error { + err := svc.collectorDeploy(ctx, "delete", ownerID, sinkID, deploymentEntry) if err != nil { return err } diff --git a/maestro/monitor/monitor.go b/maestro/monitor/monitor.go new file mode 100644 index 000000000..874ba664e --- /dev/null +++ b/maestro/monitor/monitor.go @@ -0,0 +1,301 @@ +package monitor + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "strings" + "time" + + "github.com/ns1labs/orb/maestro/kubecontrol" + rediscons1 "github.com/ns1labs/orb/maestro/redis/consumer" + + maestroconfig "github.com/ns1labs/orb/maestro/config" + sinkspb "github.com/ns1labs/orb/sinks/pb" + "go.uber.org/zap" + k8scorev1 "k8s.io/api/core/v1" + k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + idleTimeSeconds = 600 + TickerForScan = 1 * time.Minute + namespace = "otelcollectors" +) + +func NewMonitorService(logger *zap.Logger, sinksClient *sinkspb.SinkServiceClient, eventStore rediscons1.Subscriber, kubecontrol *kubecontrol.Service) Service { + return &monitorService{ + logger: logger, + sinksClient: *sinksClient, + eventStore: eventStore, + kubecontrol: *kubecontrol, + } +} + +type Service interface { + Start(ctx context.Context, cancelFunc context.CancelFunc) error + GetRunningPods(ctx context.Context) ([]string, error) +} + +type monitorService struct { + logger *zap.Logger + sinksClient sinkspb.SinkServiceClient + eventStore rediscons1.Subscriber + kubecontrol kubecontrol.Service +} + +func (svc *monitorService) Start(ctx context.Context, cancelFunc context.CancelFunc) error { + go func(ctx context.Context, cancelFunc context.CancelFunc) { + ticker := time.NewTicker(TickerForScan) + svc.logger.Info("start monitor routine", zap.Any("routine", ctx)) + defer func() { + cancelFunc() + svc.logger.Info("stopping monitor routine") + }() + for { + select { + case <-ctx.Done(): + cancelFunc() + return + case _ = <-ticker.C: + svc.logger.Info("monitoring sinks") + svc.monitorSinks(ctx) + } + } + }(ctx, cancelFunc) + return nil +} + +func (svc *monitorService) getPodLogs(ctx context.Context, pod k8scorev1.Pod) ([]string, error) { + maxTailLines := int64(10) + sinceSeconds := int64(300) + podLogOpts := k8scorev1.PodLogOptions{TailLines: &maxTailLines, SinceSeconds: &sinceSeconds} + config, err := rest.InClusterConfig() + if err != nil { + svc.logger.Error("error on get cluster config", zap.Error(err)) + return nil, err + } + clientSet, err := kubernetes.NewForConfig(config) + if err != nil { + svc.logger.Error("error on get client", zap.Error(err)) + return nil, err + } + req := clientSet.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &podLogOpts) + podLogs, err := req.Stream(ctx) + if err != nil { + svc.logger.Error("error on get logs", zap.Error(err)) + return nil, err + } + defer func(podLogs io.ReadCloser) { + err := podLogs.Close() + if err != nil { + svc.logger.Error("error closing log stream", zap.Error(err)) + } + }(podLogs) + + buf := new(bytes.Buffer) + _, err = io.Copy(buf, podLogs) + if err != nil { + svc.logger.Error("error on copying buffer", zap.Error(err)) + return nil, err + } + str := buf.String() + splitLogs := strings.Split(str, "\n") + svc.logger.Info("logs length", zap.Int("amount line logs", len(splitLogs))) + return splitLogs, nil +} + +func (svc *monitorService) GetRunningPods(ctx context.Context) ([]string, error) { + pods, err := svc.getRunningPods(ctx) + if err != nil { + svc.logger.Error("error getting running collectors") + return nil, err + } + runningSinks := make([]string, len(pods)) + if len(pods) > 0 { + for i, pod := range pods { + runningSinks[i] = strings.TrimPrefix(pod.Name, "otel-") + } + return runningSinks, nil + } + return nil, nil +} + +func (svc *monitorService) getRunningPods(ctx context.Context) ([]k8scorev1.Pod, error) { + config, err := rest.InClusterConfig() + if err != nil { + svc.logger.Error("error on get cluster config", zap.Error(err)) + return nil, err + } + clientSet, err := kubernetes.NewForConfig(config) + if err != nil { + svc.logger.Error("error on get client", zap.Error(err)) + return nil, err + } + pods, err := clientSet.CoreV1().Pods(namespace).List(ctx, k8smetav1.ListOptions{}) + return pods.Items, err +} + +func (svc *monitorService) monitorSinks(ctx context.Context) { + runningCollectors, err := svc.getRunningPods(ctx) + if err != nil { + svc.logger.Error("error getting running pods on namespace", zap.Error(err)) + return + } + if len(runningCollectors) == 0 { + svc.logger.Info("skipping, no running collectors") + return + } + sinksRes, err := svc.sinksClient.RetrieveSinks(ctx, &sinkspb.SinksFilterReq{OtelEnabled: "enabled"}) + if err != nil { + svc.logger.Error("error collecting sinks", zap.Error(err)) + return + } + svc.logger.Info("reading logs from collectors", zap.Int("collectors_length", len(sinksRes.Sinks))) + for _, collector := range runningCollectors { + var sink *sinkspb.SinkRes + for _, sinkRes := range sinksRes.Sinks { + if strings.Contains(collector.Name, sinkRes.Id) { + svc.logger.Warn("collector found for sink", zap.String("collector name", collector.Name), zap.String("sink", sinkRes.Id)) + sink = sinkRes + break + } + } + if sink == nil { + svc.logger.Warn("collector not found for sink, depleting collector", zap.String("collector name", collector.Name)) + sinkId := collector.Name[5:51] + deploymentEntry, err := svc.eventStore.GetDeploymentEntryFromSinkId(ctx, sinkId) + if err != nil { + svc.logger.Error("did not find collector entry for sink", zap.String("sink-id", sinkId)) + continue + } + err = svc.kubecontrol.DeleteOtelCollector(ctx, "", sinkId, deploymentEntry) + if err != nil { + svc.logger.Error("error removing otel collector", zap.Error(err)) + } + continue + } + var data maestroconfig.SinkData + if err := json.Unmarshal(sink.Config, &data); err != nil { + svc.logger.Warn("failed to unmarshal sink config, skipping", zap.String("sink-id", sink.Id)) + continue + } + data.SinkID = sink.Id + data.OwnerID = sink.OwnerID + // only analyze logs if current status is active + var logsErr error + var status string + if sink.GetState() == "active" { + logs, err := svc.getPodLogs(ctx, collector) + if err != nil { + svc.logger.Error("error on getting logs, skipping", zap.Error(err)) + continue + } + status, logsErr = svc.analyzeLogs(logs) + if status == "fail" { + svc.logger.Error("error during analyze logs", zap.Error(logsErr)) + continue + } + } + var lastActivity int64 + var activityErr error + if status == "active" { + lastActivity, activityErr = svc.eventStore.GetActivity(sink.Id) + // if logs reported 'active' status + // here we should check if LastActivity is up-to-date, otherwise we need to set sink as idle + var idleLimit int64 = 0 + if activityErr != nil || lastActivity == 0 { + svc.logger.Error("error on getting last collector activity", zap.Error(activityErr)) + continue + } else { + idleLimit = time.Now().Unix() - idleTimeSeconds // within 10 minutes + } + if idleLimit >= lastActivity { + //changing state on sinks + svc.eventStore.PublishSinkStateChange(sink, "idle", logsErr, err) + //changing state on redis sinker + data.State.SetFromString("idle") + svc.eventStore.UpdateSinkStateCache(ctx, data) + deploymentEntry, errDeploy := svc.eventStore.GetDeploymentEntryFromSinkId(ctx, sink.Id) + if errDeploy != nil { + svc.logger.Error("Remove collector: error on getting collector deployment from redis", zap.Error(activityErr)) + continue + } + err = svc.kubecontrol.DeleteOtelCollector(ctx, sink.OwnerID, sink.Id, deploymentEntry) + if err != nil { + svc.logger.Error("error removing otel collector", zap.Error(err)) + } + continue + } + } + //set the new sink status if changed during checks + if sink.GetState() != status && status != "" { + svc.logger.Info("changing sink status", zap.Any("before", sink.GetState()), zap.String("new status", status), zap.String("SinkID", sink.Id), zap.String("ownerID", sink.OwnerID)) + if err != nil { + svc.logger.Error("error updating status", zap.Any("before", sink.GetState()), zap.String("new status", status), zap.String("error_message (opt)", err.Error()), zap.String("SinkID", sink.Id), zap.String("ownerID", sink.OwnerID)) + } else { + svc.logger.Info("updating status", zap.Any("before", sink.GetState()), zap.String("new status", status), zap.String("SinkID", sink.Id), zap.String("ownerID", sink.OwnerID)) + // changing state on sinks + svc.eventStore.PublishSinkStateChange(sink, status, logsErr, err) + // changing state on redis sinker + data.State.SetFromString(status) + svc.eventStore.UpdateSinkStateCache(ctx, data) + } + } + } +} + +// analyzeLogs, will check for errors in exporter, and will return as follows +// for errors 429 will send a "warning" state, plus message of too many requests +// for any other errors, will add error and message +// if no error message on exporter, will log as active +// logs from otel-collector are coming in the standard from https://pkg.go.dev/log, +func (svc *monitorService) analyzeLogs(logEntry []string) (status string, err error) { + for _, logLine := range logEntry { + if len(logLine) > 24 { + // known errors + if strings.Contains(logLine, "401 Unauthorized") { + errorMessage := "error: remote write returned HTTP status 401 Unauthorized" + return "error", errors.New(errorMessage) + } + if strings.Contains(logLine, "404 Not Found") { + errorMessage := "error: remote write returned HTTP status 404 Not Found" + return "error", errors.New(errorMessage) + } + if strings.Contains(logLine, "Permanent error: remote write returned HTTP status 429 Too Many Requests") { + errorMessage := "error: remote write returned HTTP status 429 Too Many Requests" + return "warning", errors.New(errorMessage) + } + // other errors + if strings.Contains(logLine, "error") { + errStringLog := strings.TrimRight(logLine, "error") + if len(errStringLog) > 4 { + aux := strings.Split(errStringLog, "\t") + numItems := len(aux) + if numItems > 3 { + jsonError := aux[4] + errorJson := make(map[string]interface{}) + err := json.Unmarshal([]byte(jsonError), &errorJson) + if err != nil { + return "fail", err + } + if errorJson != nil && errorJson["error"] != nil { + errorMessage := errorJson["error"].(string) + return "error", errors.New(errorMessage) + } + } else { + return "error", errors.New("sink configuration error: please review your sink parameters") + } + } else { + return "error", errors.New("sink configuration error: please review your sink parameters") + } + } + } + } + // if nothing happens on logs is active + return "active", nil +} diff --git a/maestro/redis/consumer/events.go b/maestro/redis/consumer/events.go deleted file mode 100644 index bf955e3e4..000000000 --- a/maestro/redis/consumer/events.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Adapted for Orb project, modifications licensed under MPL v. 2.0: -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ -package consumer - -import ( - "github.com/ns1labs/orb/pkg/types" - "time" -) - -type sinksUpdateEvent struct { - sinkID string - owner string - config types.Metadata - timestamp time.Time -} - -type sinkerUpdateEvent struct { - ownerID string - sinkID string - state string - timestamp time.Time -} diff --git a/maestro/redis/consumer/hashset.go b/maestro/redis/consumer/hashset.go index fc022afc2..160d690a3 100644 --- a/maestro/redis/consumer/hashset.go +++ b/maestro/redis/consumer/hashset.go @@ -3,17 +3,27 @@ package consumer import ( "context" "encoding/json" + "errors" + "fmt" + "strconv" + "time" + + redis2 "github.com/go-redis/redis/v8" + "github.com/ns1labs/orb/maestro/config" + "github.com/ns1labs/orb/maestro/redis" "github.com/ns1labs/orb/pkg/types" sinkspb "github.com/ns1labs/orb/sinks/pb" "go.uber.org/zap" - "time" ) -const deploymentKey = "orb.sinks.deployment" +const ( + deploymentKey = "orb.sinks.deployment" + activityPrefix = "sinker_activity" +) func (es eventStore) GetDeploymentEntryFromSinkId(ctx context.Context, sinkId string) (string, error) { - cmd := es.client.HGet(ctx, deploymentKey, sinkId) + cmd := es.sinkerKeyRedisClient.HGet(ctx, deploymentKey, sinkId) if err := cmd.Err(); err != nil { es.logger.Error("error during redis reading of SinkId", zap.String("sink-id", sinkId), zap.Error(err)) return "", err @@ -22,30 +32,29 @@ func (es eventStore) GetDeploymentEntryFromSinkId(ctx context.Context, sinkId st } // handleSinksDeleteCollector will delete Deployment Entry and force delete otel collector -func (es eventStore) handleSinksDeleteCollector(ctx context.Context, event sinksUpdateEvent) error { - es.logger.Info("Received maestro DELETE event from sinks ID=" + event.sinkID + ", Owner ID=" + event.owner) - deployment, err := es.GetDeploymentEntryFromSinkId(ctx, event.sinkID) +func (es eventStore) handleSinksDeleteCollector(ctx context.Context, event redis.SinksUpdateEvent) error { + es.logger.Info("Received maestro DELETE event from sinks ID", zap.String("sinkID", event.SinkID), zap.String("owner", event.Owner)) + deploymentEntry, err := es.GetDeploymentEntryFromSinkId(ctx, event.SinkID) if err != nil { - es.logger.Error("did not find collector entry for sink", zap.String("sink-id", event.sinkID)) + es.logger.Error("did not find collector entry for sink", zap.String("sink-id", event.SinkID)) return err } - err = es.kubecontrol.DeleteOtelCollector(ctx, event.sinkID, deployment) + err = es.kubecontrol.DeleteOtelCollector(ctx, event.Owner, event.SinkID, deploymentEntry) if err != nil { return err } - es.client.HDel(ctx, deploymentKey, event.sinkID) return nil } // handleSinksCreateCollector will create Deployment Entry in Redis -func (es eventStore) handleSinksCreateCollector(ctx context.Context, event sinksUpdateEvent) error { - es.logger.Info("Received event to Create DeploymentEntry from sinks ID=" + event.sinkID + ", Owner ID=" + event.owner) +func (es eventStore) handleSinksCreateCollector(ctx context.Context, event redis.SinksUpdateEvent) error { + es.logger.Info("Received event to Create DeploymentEntry from sinks ID", zap.String("sinkID", event.SinkID), zap.String("owner", event.Owner)) sinkData, err := es.sinksClient.RetrieveSink(ctx, &sinkspb.SinkByIDReq{ - SinkID: event.sinkID, - OwnerID: event.owner, + SinkID: event.SinkID, + OwnerID: event.Owner, }) if err != nil { - es.logger.Error("could not fetch info for sink", zap.String("sink-id", event.sinkID), zap.Error(err)) + es.logger.Error("could not fetch info for sink", zap.String("sink-id", event.SinkID), zap.Error(err)) } var data config.SinkData if err := json.Unmarshal(sinkData.Config, &data); err != nil { @@ -54,7 +63,7 @@ func (es eventStore) handleSinksCreateCollector(ctx context.Context, event sinks sinkUrl := data.Url sinkUsername := data.User sinkPassword := data.Password - err2 := es.CreateDeploymentEntry(ctx, event.sinkID, sinkUrl, sinkUsername, sinkPassword) + err2 := es.CreateDeploymentEntry(ctx, event.SinkID, sinkUrl, sinkUsername, sinkPassword) if err2 != nil { return err2 } @@ -69,19 +78,19 @@ func (es eventStore) CreateDeploymentEntry(ctx context.Context, sinkId, sinkUrl, return err } - es.client.HSet(ctx, deploymentKey, sinkId, deploy) + es.sinkerKeyRedisClient.HSet(ctx, deploymentKey, sinkId, deploy) return nil } // handleSinksUpdateCollector will update Deployment Entry in Redis and force update otel collector -func (es eventStore) handleSinksUpdateCollector(ctx context.Context, event sinksUpdateEvent) error { - es.logger.Info("Received event to Update DeploymentEntry from sinks ID=" + event.sinkID + ", Owner ID=" + event.owner) +func (es eventStore) handleSinksUpdateCollector(ctx context.Context, event redis.SinksUpdateEvent) error { + es.logger.Info("Received event to Update DeploymentEntry from sinks ID", zap.String("sinkID", event.SinkID), zap.String("owner", event.Owner)) sinkData, err := es.sinksClient.RetrieveSink(ctx, &sinkspb.SinkByIDReq{ - SinkID: event.sinkID, - OwnerID: event.owner, + SinkID: event.SinkID, + OwnerID: event.Owner, }) if err != nil { - es.logger.Error("could not fetch info for sink", zap.String("sink-id", event.sinkID), zap.Error(err)) + es.logger.Error("could not fetch info for sink", zap.String("sink-id", event.SinkID), zap.Error(err)) } var data config.SinkData if err := json.Unmarshal(sinkData.Config, &data); err != nil { @@ -90,33 +99,117 @@ func (es eventStore) handleSinksUpdateCollector(ctx context.Context, event sinks sinkUrl := data.Url sinkUsername := data.User sinkPassword := data.Password - deploy, err := config.GetDeploymentJson(es.kafkaUrl, event.sinkID, sinkUrl, sinkUsername, sinkPassword) + deploy, err := config.GetDeploymentJson(es.kafkaUrl, event.SinkID, sinkUrl, sinkUsername, sinkPassword) if err != nil { - es.logger.Error("error trying to get deployment json for sink ID", zap.String("sinkId", event.sinkID)) + es.logger.Error("error trying to get deployment json for sink ID", zap.String("sinkId", event.SinkID)) return err } - es.client.HSet(ctx, deploymentKey, event.sinkID, deploy) - err = es.kubecontrol.UpdateOtelCollector(ctx, event.sinkID, deploy) + es.sinkerKeyRedisClient.HSet(ctx, deploymentKey, event.SinkID, deploy) + err = es.kubecontrol.UpdateOtelCollector(ctx, event.Owner, event.SinkID, deploy) + if err != nil { + return err + } + // changing state on updated sink to unknown + sinkData.OwnerID = event.Owner + es.PublishSinkStateChange(sinkData, "unknown", err, err) + data.SinkID = sinkData.Id + data.OwnerID = sinkData.OwnerID + data.State.SetFromString("unknown") + es.UpdateSinkStateCache(ctx, data) + return nil +} + +func (es eventStore) UpdateSinkCache(ctx context.Context, data config.SinkData) (err error) { + data.State = config.Unknown + keyPrefix := "sinker_key" + skey := fmt.Sprintf("%s-%s:%s", keyPrefix, data.OwnerID, data.SinkID) + bytes, err := json.Marshal(data) if err != nil { return err } + if err = es.sinkerKeyRedisClient.Set(ctx, skey, bytes, 0).Err(); err != nil { + return err + } + return +} +func (es eventStore) UpdateSinkStateCache(ctx context.Context, data config.SinkData) (err error) { + keyPrefix := "sinker_key" + skey := fmt.Sprintf("%s-%s:%s", keyPrefix, data.OwnerID, data.SinkID) + bytes, err := json.Marshal(data) + if err != nil { + return err + } + if err = es.sinkerKeyRedisClient.Set(ctx, skey, bytes, 0).Err(); err != nil { + return err + } + return +} + +// GetActivity collector activity +func (es eventStore) GetActivity(sinkID string) (int64, error) { + if sinkID == "" { + return 0, errors.New("invalid parameters") + } + skey := fmt.Sprintf("%s:%s", activityPrefix, sinkID) + secs, err := es.sinkerKeyRedisClient.Get(context.Background(), skey).Result() + if err != nil { + return 0, err + } + lastActivity, _ := strconv.ParseInt(secs, 10, 64) + return lastActivity, nil +} + +func (es eventStore) RemoveSinkActivity(ctx context.Context, sinkId string) error { + skey := fmt.Sprintf("%s:%s", activityPrefix, sinkId) + cmd := es.sinkerKeyRedisClient.Del(ctx, skey, sinkId) + if err := cmd.Err(); err != nil { + es.logger.Error("error during redis reading of SinkId", zap.String("sink-id", sinkId), zap.Error(err)) + return err + } return nil } -func decodeSinksEvent(event map[string]interface{}, operation string) (sinksUpdateEvent, error) { - val := sinksUpdateEvent{ - sinkID: read(event, "sink_id", ""), - owner: read(event, "owner", ""), - timestamp: time.Now(), +func (es eventStore) PublishSinkStateChange(sink *sinkspb.SinkRes, status string, logsErr error, err error) { + streamID := "orb.sinker" + logMessage := "" + if logsErr != nil { + logMessage = logsErr.Error() + } + event := redis.SinkerUpdateEvent{ + SinkID: sink.Id, + Owner: sink.OwnerID, + State: status, + Msg: logMessage, + Timestamp: time.Now(), + } + + record := &redis2.XAddArgs{ + Stream: streamID, + Values: event.Encode(), + } + err = es.streamRedisClient.XAdd(context.Background(), record).Err() + if err != nil { + es.logger.Error("error sending event to event store", zap.Error(err)) + } + es.logger.Info("Maestro notified change of status for sink", zap.String("newState", status), zap.String("sink-id", sink.Id)) +} + +func decodeSinksEvent(event map[string]interface{}, operation string) (redis.SinksUpdateEvent, error) { + val := redis.SinksUpdateEvent{ + SinkID: read(event, "sink_id", ""), + Owner: read(event, "owner", ""), + Config: readMetadata(event, "config"), + Timestamp: time.Now(), } if operation != sinksDelete { var metadata types.Metadata if err := json.Unmarshal([]byte(read(event, "config", "")), &metadata); err != nil { - return sinksUpdateEvent{}, err + return redis.SinksUpdateEvent{}, err } - val.config = metadata + val.Config = metadata return val, nil } + return val, nil } diff --git a/maestro/redis/consumer/streams.go b/maestro/redis/consumer/streams.go index 9016b4356..d3a868cac 100644 --- a/maestro/redis/consumer/streams.go +++ b/maestro/redis/consumer/streams.go @@ -2,19 +2,24 @@ package consumer import ( "context" + "time" + + "github.com/ns1labs/orb/maestro/config" + "github.com/ns1labs/orb/pkg/errors" + "github.com/ns1labs/orb/maestro/kubecontrol" + maestroredis "github.com/ns1labs/orb/maestro/redis" "github.com/ns1labs/orb/pkg/types" sinkspb "github.com/ns1labs/orb/sinks/pb" - "time" "github.com/go-redis/redis/v8" "go.uber.org/zap" ) const ( - streamSinker = "orb.sinker" streamSinks = "orb.sinks" - group = "orb.collectors" + streamSinker = "orb.sinker" + groupMaestro = "orb.maestro" sinkerPrefix = "sinker." sinkerUpdate = sinkerPrefix + "update" @@ -30,125 +35,151 @@ const ( type Subscriber interface { CreateDeploymentEntry(ctx context.Context, sinkId, sinkUrl, sinkUsername, sinkPassword string) error GetDeploymentEntryFromSinkId(ctx context.Context, sinkId string) (string, error) - SubscribeSinks(context context.Context) error - SubscribeSinker(context context.Context) error + + UpdateSinkCache(ctx context.Context, data config.SinkData) (err error) + UpdateSinkStateCache(ctx context.Context, data config.SinkData) (err error) + PublishSinkStateChange(sink *sinkspb.SinkRes, status string, logsErr error, err error) + + GetActivity(sinkID string) (int64, error) + RemoveSinkActivity(ctx context.Context, sinkId string) error + + SubscribeSinksEvents(context context.Context) error + SubscribeSinkerEvents(context context.Context) error } type eventStore struct { - kafkaUrl string - kubecontrol kubecontrol.Service - sinksClient sinkspb.SinkServiceClient - client *redis.Client - esconsumer string - logger *zap.Logger + kafkaUrl string + kubecontrol kubecontrol.Service + sinksClient sinkspb.SinkServiceClient + streamRedisClient *redis.Client + sinkerKeyRedisClient *redis.Client + esconsumer string + logger *zap.Logger } -func NewEventStore(client *redis.Client, kafkaUrl string, kubecontrol kubecontrol.Service, esconsumer string, sinksClient sinkspb.SinkServiceClient, logger *zap.Logger) Subscriber { +func NewEventStore(streamRedisClient, sinkerKeyRedisClient *redis.Client, kafkaUrl string, kubecontrol kubecontrol.Service, esconsumer string, sinksClient sinkspb.SinkServiceClient, logger *zap.Logger) Subscriber { return eventStore{ - kafkaUrl: kafkaUrl, - kubecontrol: kubecontrol, - client: client, - sinksClient: sinksClient, - esconsumer: esconsumer, - logger: logger, + kafkaUrl: kafkaUrl, + kubecontrol: kubecontrol, + streamRedisClient: streamRedisClient, + sinkerKeyRedisClient: sinkerKeyRedisClient, + sinksClient: sinksClient, + esconsumer: esconsumer, + logger: logger, } } -func (es eventStore) SubscribeSinker(context context.Context) error { - //listening sinker events - err := es.client.XGroupCreateMkStream(context, streamSinker, group, "$").Err() +// SubscribeSinkerEvents Subscribe to listen events from sinker to maestro +func (es eventStore) SubscribeSinkerEvents(ctx context.Context) error { + err := es.streamRedisClient.XGroupCreateMkStream(ctx, streamSinker, groupMaestro, "$").Err() if err != nil && err.Error() != exists { return err } for { - streams, err := es.client.XReadGroup(context, &redis.XReadGroupArgs{ - Group: group, - Consumer: es.esconsumer, + streams, err := es.streamRedisClient.XReadGroup(ctx, &redis.XReadGroupArgs{ + Group: groupMaestro, + Consumer: "orb_maestro-es-consumer", Streams: []string{streamSinker, ">"}, Count: 100, }).Result() if err != nil || len(streams) == 0 { continue } - for _, msg := range streams[0].Messages { event := msg.Values - var err error - switch event["operation"] { - case sinkerUpdate: - rte := decodeSinkerStateUpdate(event) - if rte.state == "idle" { - err = es.handleSinkerDeleteCollector(context, rte) //sinker request delete collector - } else if rte.state == "active" { - err = es.handleSinkerCreateCollector(context, rte) //sinker request create collector + rte := decodeSinkerStateUpdate(event) + // here we should listen just event coming from sinker, not our own "publishState" events + if rte.State == "active" { + es.logger.Info("received message in sinker event bus", zap.Any("operation", event["operation"])) + switch event["operation"] { + case sinkerUpdate: + go func() { + err = es.handleSinkerCreateCollector(ctx, rte) //sinker request to create collector + if err != nil { + es.logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) + } else { + es.streamRedisClient.XAck(ctx, streamSinker, groupMaestro, msg.ID) + } + }() + + case <-ctx.Done(): + return errors.New("stopped listening to sinks, due to context cancellation") } } - if err != nil { - es.logger.Error("Failed to handle sinker event", zap.String("operation", event["operation"].(string)), zap.Error(err)) - break - } - es.client.XAck(context, streamSinker, group, msg.ID) } } } -func (es eventStore) SubscribeSinks(context context.Context) error { - err := es.client.XGroupCreateMkStream(context, streamSinks, group, "$").Err() +// SubscribeSinksEvents Subscribe to listen events from sinks to maestro +func (es eventStore) SubscribeSinksEvents(ctx context.Context) error { + //listening sinker events + err := es.streamRedisClient.XGroupCreateMkStream(ctx, streamSinks, groupMaestro, "$").Err() if err != nil && err.Error() != exists { - return nil + return err } + for { - streams, err := es.client.XReadGroup(context, &redis.XReadGroupArgs{ - Group: group, - Consumer: es.esconsumer, + streams, err := es.streamRedisClient.XReadGroup(ctx, &redis.XReadGroupArgs{ + Group: groupMaestro, + Consumer: "orb_maestro-es-consumer", Streams: []string{streamSinks, ">"}, Count: 100, }).Result() if err != nil || len(streams) == 0 { continue } - for _, msg := range streams[0].Messages { event := msg.Values - rte, err := decodeSinksEvent(event, event["operation"].(string)) if err != nil { - es.logger.Error("error decoding sinks event", zap.Any("operation", event["operation"]), zap.Any("sink_event", event), zap.Error(err)) + es.logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) break } + es.logger.Info("received message in sinks event bus", zap.Any("operation", event["operation"])) switch event["operation"] { case sinksCreate: - if v, ok := rte.config["opentelemetry"]; ok && v.(string) == "enabled" { - err = es.handleSinksCreateCollector(context, rte) //should create collector - } - + go func() { + err = es.handleSinksCreateCollector(ctx, rte) //should create collector + if err != nil { + es.logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) + } else { + es.streamRedisClient.XAck(ctx, streamSinks, groupMaestro, msg.ID) + } + }() case sinksUpdate: - if v, ok := rte.config["opentelemetry"]; ok && v.(string) == "enabled" { - err = es.handleSinksUpdateCollector(context, rte) //should create collector - } - + go func() { + err = es.handleSinksUpdateCollector(ctx, rte) //should create collector + if err != nil { + es.logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) + } else { + es.streamRedisClient.XAck(ctx, streamSinks, groupMaestro, msg.ID) + } + }() case sinksDelete: - err = es.handleSinksDeleteCollector(context, rte) //should delete collector - + go func() { + err = es.handleSinksDeleteCollector(ctx, rte) //should delete collector + if err != nil { + es.logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) + } else { + es.streamRedisClient.XAck(ctx, streamSinks, groupMaestro, msg.ID) + } + }() + case <-ctx.Done(): + return errors.New("stopped listening to sinks, due to context cancellation") } - if err != nil { - es.logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) - break - } - es.client.XAck(context, streamSinks, group, msg.ID) } } } // handleSinkerDeleteCollector Delete collector -func (es eventStore) handleSinkerDeleteCollector(ctx context.Context, event sinkerUpdateEvent) error { - es.logger.Info("Received maestro DELETE event from sinker, sink state=" + event.state + ", , Sink ID=" + event.sinkID + ", Owner ID=" + event.ownerID) - deployment, err := es.GetDeploymentEntryFromSinkId(ctx, event.sinkID) +func (es eventStore) handleSinkerDeleteCollector(ctx context.Context, event maestroredis.SinkerUpdateEvent) error { + es.logger.Info("Received maestro DELETE event from sinker, sink state", zap.String("state", event.State), zap.String("sinkID", event.SinkID), zap.String("ownerID", event.Owner)) + deploymentEntry, err := es.GetDeploymentEntryFromSinkId(ctx, event.SinkID) if err != nil { return err } - err = es.kubecontrol.DeleteOtelCollector(ctx, event.sinkID, deployment) + err = es.kubecontrol.DeleteOtelCollector(ctx, event.Owner, event.SinkID, deploymentEntry) if err != nil { return err } @@ -156,26 +187,29 @@ func (es eventStore) handleSinkerDeleteCollector(ctx context.Context, event sink } // handleSinkerCreateCollector Create collector -func (es eventStore) handleSinkerCreateCollector(ctx context.Context, event sinkerUpdateEvent) error { - es.logger.Info("Received maestro CREATE event from sinker, sink state=" + event.state + ", Sink ID=" + event.sinkID + ", Owner ID=" + event.ownerID) - deploymentEntry, err := es.GetDeploymentEntryFromSinkId(ctx, event.sinkID) +func (es eventStore) handleSinkerCreateCollector(ctx context.Context, event maestroredis.SinkerUpdateEvent) error { + es.logger.Info("Received maestro CREATE event from sinker, sink state", zap.String("state", event.State), zap.String("sinkID", event.SinkID), zap.String("ownerID", event.Owner)) + deploymentEntry, err := es.GetDeploymentEntryFromSinkId(ctx, event.SinkID) if err != nil { + es.logger.Error("could not find deployment entry from sink-id", zap.String("sinkID", event.SinkID), zap.Error(err)) return err } - err = es.kubecontrol.CreateOtelCollector(ctx, event.sinkID, deploymentEntry) + err = es.kubecontrol.CreateOtelCollector(ctx, event.Owner, event.SinkID, deploymentEntry) if err != nil { + es.logger.Error("could not find deployment entry from sink-id", zap.String("sinkID", event.SinkID), zap.Error(err)) return err } return nil } -func decodeSinkerStateUpdate(event map[string]interface{}) sinkerUpdateEvent { - val := sinkerUpdateEvent{ - ownerID: read(event, "owner", ""), - sinkID: read(event, "sink_id", ""), - state: read(event, "state", ""), - timestamp: time.Time{}, +func decodeSinkerStateUpdate(event map[string]interface{}) maestroredis.SinkerUpdateEvent { + val := maestroredis.SinkerUpdateEvent{ + Owner: read(event, "owner", ""), + SinkID: read(event, "sink_id", ""), + State: read(event, "state", ""), + Timestamp: time.Time{}, } + return val } diff --git a/maestro/redis/events.go b/maestro/redis/events.go new file mode 100644 index 000000000..f5507e91c --- /dev/null +++ b/maestro/redis/events.go @@ -0,0 +1,49 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +// Adapted for Orb project, modifications licensed under MPL v. 2.0: +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ +package redis + +import ( + "github.com/ns1labs/orb/pkg/types" + "time" +) + +const ( + SinkerPrefix = "sinker." + SinkerUpdate = SinkerPrefix + "update" +) + +type SinksUpdateEvent struct { + SinkID string + Owner string + Config types.Metadata + Timestamp time.Time +} + +type SinkerUpdateEvent struct { + SinkID string + Owner string + State string + Msg string + Timestamp time.Time +} + +func (cse SinkerUpdateEvent) Encode() map[string]interface{} { + return map[string]interface{}{ + "sink_id": cse.SinkID, + "owner": cse.Owner, + "state": cse.State, + "msg": cse.Msg, + "timestamp": cse.Timestamp.Unix(), + "operation": SinkerUpdate, + } +} + +type DeploymentEvent struct { + SinkID string + DeploymentYaml string +} diff --git a/maestro/service.go b/maestro/service.go index bba29e8ef..69f3f027c 100644 --- a/maestro/service.go +++ b/maestro/service.go @@ -11,6 +11,8 @@ package maestro import ( "context" "encoding/json" + "github.com/ns1labs/orb/maestro/monitor" + "strings" "github.com/go-redis/redis/v8" maestroconfig "github.com/ns1labs/orb/maestro/config" @@ -27,25 +29,30 @@ type maestroService struct { serviceContext context.Context serviceCancelFunc context.CancelFunc - kubecontrol kubecontrol.Service - logger *zap.Logger - redisClient *redis.Client - sinksClient sinkspb.SinkServiceClient - esCfg config.EsConfig - eventStore rediscons1.Subscriber - kafkaUrl string + kubecontrol kubecontrol.Service + monitor monitor.Service + logger *zap.Logger + streamRedisClient *redis.Client + sinkerRedisClient *redis.Client + sinksClient sinkspb.SinkServiceClient + esCfg config.EsConfig + eventStore rediscons1.Subscriber + kafkaUrl string } -func NewMaestroService(logger *zap.Logger, redisClient *redis.Client, sinksGrpcClient sinkspb.SinkServiceClient, esCfg config.EsConfig, otelCfg config.OtelConfig) Service { +func NewMaestroService(logger *zap.Logger, streamRedisClient *redis.Client, sinkerRedisClient *redis.Client, sinksGrpcClient sinkspb.SinkServiceClient, esCfg config.EsConfig, otelCfg config.OtelConfig) Service { kubectr := kubecontrol.NewService(logger) - eventStore := rediscons1.NewEventStore(redisClient, otelCfg.KafkaUrl, kubectr, esCfg.Consumer, sinksGrpcClient, logger) + eventStore := rediscons1.NewEventStore(streamRedisClient, sinkerRedisClient, otelCfg.KafkaUrl, kubectr, esCfg.Consumer, sinksGrpcClient, logger) + monitorService := monitor.NewMonitorService(logger, &sinksGrpcClient, eventStore, &kubectr) return &maestroService{ - logger: logger, - redisClient: redisClient, - sinksClient: sinksGrpcClient, - kubecontrol: kubectr, - eventStore: eventStore, - kafkaUrl: otelCfg.KafkaUrl, + logger: logger, + streamRedisClient: streamRedisClient, + sinkerRedisClient: sinkerRedisClient, + sinksClient: sinksGrpcClient, + kubecontrol: kubectr, + monitor: monitorService, + eventStore: eventStore, + kafkaUrl: otelCfg.KafkaUrl, } } @@ -66,6 +73,12 @@ func (svc *maestroService) Start(ctx context.Context, cancelFunction context.Can return err } + pods, err := svc.monitor.GetRunningPods(ctx) + if err != nil { + loadCancelFunction() + return err + } + for _, sinkRes := range sinksRes.Sinks { sinkContext := context.WithValue(loadCtx, "sink-id", sinkRes.Id) var data maestroconfig.SinkData @@ -73,50 +86,75 @@ func (svc *maestroService) Start(ctx context.Context, cancelFunction context.Can svc.logger.Warn("failed to unmarshal sink, skipping", zap.String("sink-id", sinkRes.Id)) continue } - if val, _ := svc.eventStore.GetDeploymentEntryFromSinkId(ctx, sinkRes.Id); val != "" { svc.logger.Info("Skipping deploymentEntry because it is already created") - continue + } else { + err := svc.eventStore.CreateDeploymentEntry(sinkContext, sinkRes.Id, data.Url, data.User, data.Password) + if err != nil { + svc.logger.Warn("failed to create deploymentEntry for sink, skipping", zap.String("sink-id", sinkRes.Id)) + continue + } + err = svc.eventStore.UpdateSinkCache(ctx, data) + if err != nil { + svc.logger.Warn("failed to update cache for sink", zap.String("sink-id", sinkRes.Id)) + continue + } + svc.logger.Info("successfully created deploymentEntry for sink", zap.String("sink-id", sinkRes.Id), zap.String("state", sinkRes.State)) } - err := svc.eventStore.CreateDeploymentEntry(sinkContext, sinkRes.Id, data.Url, data.User, data.Password) - if err != nil { - svc.logger.Warn("failed to create deploymentEntry for sink, skipping", zap.String("sink-id", sinkRes.Id)) - continue + isDeployed := false + if len(pods) > 0 { + for _, pod := range pods { + if strings.Contains(pod, sinkRes.Id) { + isDeployed = true + break + } + } } - svc.logger.Info("successfully created deploymentEntry for sink", zap.String("sink-id", sinkRes.Id), zap.String("state", sinkRes.State)) - // if State is Active, deploy OtelCollector - if sinkRes.State == "1" || sinkRes.State == "active" { + if sinkRes.State == "active" && !isDeployed { deploymentEntry, err := svc.eventStore.GetDeploymentEntryFromSinkId(sinkContext, sinkRes.Id) if err != nil { - svc.logger.Warn("failed to fetch deploymentEntry for sink, skipping", zap.String("sink-id", sinkRes.Id)) + svc.logger.Warn("failed to fetch deploymentEntry for sink, skipping", zap.String("sink-id", sinkRes.Id), zap.Error(err)) continue } - err = svc.kubecontrol.CreateOtelCollector(sinkContext, sinkRes.Id, deploymentEntry) + err = svc.kubecontrol.CreateOtelCollector(sinkContext, sinkRes.OwnerID, sinkRes.Id, deploymentEntry) if err != nil { - svc.logger.Warn("failed to deploy OtelCollector for sink, skipping", zap.String("sink-id", sinkRes.Id)) + svc.logger.Warn("failed to deploy OtelCollector for sink, skipping", zap.String("sink-id", sinkRes.Id), zap.Error(err)) continue } svc.logger.Info("successfully created otel collector for sink", zap.String("sink-id", sinkRes.Id)) } } - go svc.subscribeToSinksES(ctx) - go svc.subscribeToSinkerES(ctx) + go svc.subscribeToSinksEvents(ctx) + go svc.subscribeToSinkerEvents(ctx) + + monitorCtx := context.WithValue(ctx, "routine", "monitor") + err = svc.monitor.Start(monitorCtx, cancelFunction) + if err != nil { + svc.logger.Error("error during monitor routine start", zap.Error(err)) + cancelFunction() + return err + } + return nil } -func (svc *maestroService) subscribeToSinkerES(ctx context.Context) { - if err := svc.eventStore.SubscribeSinker(ctx); err != nil { - svc.logger.Error("Bootstrap service failed to subscribe to event sourcing sinker", zap.Error(err)) +func (svc *maestroService) subscribeToSinksEvents(ctx context.Context) { + if err := svc.eventStore.SubscribeSinksEvents(ctx); err != nil { + svc.logger.Error("Bootstrap service failed to subscribe to event sourcing", zap.Error(err)) + return } - svc.logger.Info("Subscribed to Redis Event Store for sinker") + svc.logger.Info("finished reading sinks events") + ctx.Done() } -func (svc *maestroService) subscribeToSinksES(ctx context.Context) { - svc.logger.Info("Subscribed to Redis Event Store for sinks") - if err := svc.eventStore.SubscribeSinks(ctx); err != nil { - svc.logger.Error("Bootstrap service failed to subscribe to event sourcing sinks", zap.Error(err)) +func (svc *maestroService) subscribeToSinkerEvents(ctx context.Context) { + if err := svc.eventStore.SubscribeSinkerEvents(ctx); err != nil { + svc.logger.Error("Bootstrap service failed to subscribe to event sourcing", zap.Error(err)) + return } + svc.logger.Info("finished reading sinker events") + ctx.Done() } diff --git a/migrate/migration/m3_enable_otel_all_sinks.go b/migrate/migration/m3_enable_otel_all_sinks.go new file mode 100644 index 000000000..5bb8e58c1 --- /dev/null +++ b/migrate/migration/m3_enable_otel_all_sinks.go @@ -0,0 +1,113 @@ +package migration + +import ( + "context" + "github.com/ns1labs/orb/migrate/postgres" + "github.com/ns1labs/orb/pkg/db" + "github.com/ns1labs/orb/pkg/errors" + "github.com/ns1labs/orb/pkg/types" + "github.com/ns1labs/orb/sinks" + "go.uber.org/zap" +) + +type M3SinksOpenTelemetry struct { + logger *zap.Logger + dbSinks postgres.Database + pwdSvc sinks.PasswordService +} + +func NewM3SinksOpenTelemetry(log *zap.Logger, dbSinks postgres.Database) Plan { + return &M3SinksOpenTelemetry{logger: log, dbSinks: dbSinks} +} + +func (m M3SinksOpenTelemetry) Up() (err error) { + ctx := context.Background() + q := "SELECT Id, Metadata FROM sinks" + params := map[string]interface{}{} + rows, err := m.dbSinks.NamedQueryContext(ctx, q, params) + if err != nil { + return + } + for rows.Next() { + qSink := querySink{} + if err = rows.StructScan(&qSink); err != nil { + return err + } + sink := sinks.Sink{ + ID: qSink.Id, + Config: qSink.Metadata, + } + sink, err = m.addOpenTelemetryFlag(sink) + if err != nil { + m.logger.Error("failed to encrypt data for id", zap.String("id", qSink.Id), zap.Error(err)) + return err + } + params := map[string]interface{}{ + "id": sink.ID, + "metadata": db.Metadata(sink.Config), + } + updateQuery := "UPDATE sinks SET metadata = :metadata WHERE id = :id" + _, err := m.dbSinks.NamedQueryContext(ctx, updateQuery, params) + if err != nil { + m.logger.Error("failed to update data for id", zap.String("id", qSink.Id), zap.Error(err)) + return err + } + } + return nil +} + +func (m M3SinksOpenTelemetry) Down() (err error) { + ctx := context.Background() + q := "SELECT Id, Metadata FROM sinks" + params := map[string]interface{}{} + rows, err := m.dbSinks.NamedQueryContext(ctx, q, params) + if err != nil { + return + } + for rows.Next() { + qSink := querySink{} + if err = rows.StructScan(&qSink); err != nil { + return err + } + sink := sinks.Sink{ + ID: qSink.Id, + Config: qSink.Metadata, + } + sink, err = m.rollbackOpenTelemetryFlag(sink) + if err != nil { + if err.Error() != "skip" { + m.logger.Error("failed to encrypt data for id", zap.String("id", qSink.Id), zap.Error(err)) + return err + } + continue + } + params := map[string]interface{}{ + "id": sink.ID, + "metadata": db.Metadata(sink.Config), + } + updateQuery := "UPDATE sinks SET metadata = :metadata WHERE id = :id" + _, err := m.dbSinks.NamedQueryContext(ctx, updateQuery, params) + if err != nil { + m.logger.Error("failed to update data for id", zap.String("id", qSink.Id), zap.Error(err)) + return err + } + } + return nil +} + +func (m M3SinksOpenTelemetry) addOpenTelemetryFlag(sink sinks.Sink) (sinks.Sink, error) { + newMetadata := types.Metadata{ + "opentelemetry": "enabled", + "migrated": "m3", + } + sink.Config.Merge(newMetadata) + return sink, nil +} + +func (m M3SinksOpenTelemetry) rollbackOpenTelemetryFlag(sink sinks.Sink) (sinks.Sink, error) { + if _, ok := sink.Config["migrated"]; !ok { + return sinks.Sink{}, errors.New("skip") + } + sink.Config.RemoveKeys([]string{"opentelemetry", "migrated"}) + return sink, nil +} diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go index 1dd926525..7d43dd283 100644 --- a/pkg/errors/errors.go +++ b/pkg/errors/errors.go @@ -8,7 +8,7 @@ package errors -// Error specifies an API that must be fullfiled by error type +// Error specifies an API that must be fulfilled by error type type Error interface { // Error implements the error interface. diff --git a/pkg/types/maps.go b/pkg/types/maps.go index 5d22287a1..4e0337162 100644 --- a/pkg/types/maps.go +++ b/pkg/types/maps.go @@ -14,7 +14,11 @@ type Tags map[string]string func (t *Tags) Merge(newTags map[string]string) { for k, v := range newTags { - (*t)[k] = v + if v == "" { + delete(*t, k) + } else { + (*t)[k] = v + } } } @@ -39,6 +43,24 @@ func (s *Metadata) RestrictKeys(predicate func(string) bool) { } } +func (s *Metadata) Merge(metadataToAdd Metadata) { + for k, v := range metadataToAdd { + if v == "" { + delete(*s, k) + } else { + (*s)[k] = v + } + } +} + +func (s *Metadata) RemoveKeys(keys []string) { + for _, key := range keys { + if _, ok := (*s)[key]; ok { + delete(*s, key) + } + } +} + func (s *Metadata) IsApplicable(filterFunc func(string, interface{}) bool) bool { for key, value := range *s { if filterFunc(key, value) { diff --git a/policies/postgres/policies.go b/policies/postgres/policies.go index 116b6bc89..2806d3429 100644 --- a/policies/postgres/policies.go +++ b/policies/postgres/policies.go @@ -832,10 +832,6 @@ func toDataset(dba dbDataset) policies.Dataset { Tags: types.Tags(dba.Tags), } - //var sinkIDs []string - //sinkIDs = dba.SinkIDs - //dataset.SinkIDs = &sinkIDs - return dataset } diff --git a/python-test/README.md b/python-test/README.md index b491cfdda..58be9f9bb 100644 --- a/python-test/README.md +++ b/python-test/README.md @@ -103,6 +103,18 @@ Then fill in the correct values: - Required if `use_orb_live_address_pattern` is false - URL of the Orb deployment mqtt. Obs: You MUST include the protocol and the port. + +## List scenarios to be performed + +You can easily check the scenarios that will be executed considering the chosen tag by executing dry-run. + +For example, run the command below to check scenarios belonging to the smoke test suite: +> behavex -t=@smoke --dry-run + +Run the command below to check scenarios belonging to the sanity test suite: +> behavex -t=@sanity --dry-run + + ## Run behave Simply run `behave`, optionally passing the feature file as follows: @@ -141,7 +153,14 @@ Running smoke tests: > behavex -t=@smoke --parallel-processes=8 --parallel-scheme=scenario -## Tip +## Test execution reports +[behavex](https://github.com/hrcorval/behavex) provides a friendly HTML test execution report that contains information related to test scenarios, execution status, execution evidence and metrics. A filters bar is also provided to filter scenarios by name, tag or status. + +It should be available at the following path: + +/report.html + +## Clean your environment After running the tests, clean up your environment by running the command: diff --git a/python-test/docs/agent_groups/check_agent_groups_details.md b/python-test/docs/agent_groups/check_agent_groups_details.md deleted file mode 100644 index b546a51a7..000000000 --- a/python-test/docs/agent_groups/check_agent_groups_details.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Check agent groups details -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2 - Get an agent group - -- REST API Method: GET -- endpoint: /agent_groups/agent_group_id - -## Expected Result: -- Status code must be 200 and the group name, description, matches against and tags must be returned on response - - diff --git a/python-test/docs/agent_groups/check_if_is_possible_cancel_operations_with_no_change.md b/python-test/docs/agent_groups/check_if_is_possible_cancel_operations_with_no_change.md deleted file mode 100644 index 6b849fe37..000000000 --- a/python-test/docs/agent_groups/check_if_is_possible_cancel_operations_with_no_change.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Check if is possible cancel operations with no change -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2 - On agent groups' page (`orb.live/pages/fleet/groups`) click on edit button -3 - Change groups' name and click "next" -4 - Change groups' description and click "next" -4 - Change groups' tag and click "next" -5 - Click "back" until return to agent groups' page - -## Expected Result: -- No changes must have been applied to the agent group - diff --git a/python-test/docs/agent_groups/check_if_total_agent_groups_on_agent_groups'_page_is_correct.md b/python-test/docs/agent_groups/check_if_total_agent_groups_on_agent_groups'_page_is_correct.md deleted file mode 100644 index f9d308d47..000000000 --- a/python-test/docs/agent_groups/check_if_total_agent_groups_on_agent_groups'_page_is_correct.md +++ /dev/null @@ -1,20 +0,0 @@ -## Scenario: Check if total agent groups on agent groups' page is correct -## Steps: -1 - Create multiple agent groups - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2 - Get all existing agent groups - -- REST API Method: GET -- endpoint: /agent_groups - -3 - On agent groups' page (`orb.live/pages/fleet/groups`) check the total number of agent groups at the end of the agent groups table - -4 - Count the number of existing agent groups - -## Expected Result: -- Total agent groups on API response, agent groups page and the real number must be the same - diff --git a/python-test/docs/agent_groups/create_agent_group_with_description.md b/python-test/docs/agent_groups/create_agent_group_with_description.md deleted file mode 100644 index 1cb2dd29c..000000000 --- a/python-test/docs/agent_groups/create_agent_group_with_description.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create agent group with description -## Steps: - -1 - Create an agent groups with description - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the agent group must be created diff --git a/python-test/docs/agent_groups/create_agent_group_with_duplicate_name.md b/python-test/docs/agent_groups/create_agent_group_with_duplicate_name.md deleted file mode 100644 index 5ecdbb2e9..000000000 --- a/python-test/docs/agent_groups/create_agent_group_with_duplicate_name.md +++ /dev/null @@ -1,13 +0,0 @@ -## Scenario: Create agent group with duplicate name -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2 - Create another agent group using the same agent group name - -## Expected Result: -- First request must have status code 201 (created) and one group must be created on orb -- Second request must fail with status code 409 (conflict) and no other group must be created (make sure that first group has not been modified) diff --git a/python-test/docs/agent_groups/create_agent_group_with_invalid_name_(regex).md b/python-test/docs/agent_groups/create_agent_group_with_invalid_name_(regex).md deleted file mode 100644 index 3febc8735..000000000 --- a/python-test/docs/agent_groups/create_agent_group_with_invalid_name_(regex).md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Create agent group with invalid name (regex) -## Steps: -1 - Create an agent group using an invalid regex to agent group name - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} -- example of invalid regex: - - * name starting with non-alphabetic characters - * name with just 1 letter - * space-separated composite name - -## Expected Result: -- Request must fail with status code 400 (bad request) and no group must be created diff --git a/python-test/docs/agent_groups/create_agent_group_with_multiple_tags.md b/python-test/docs/agent_groups/create_agent_group_with_multiple_tags.md deleted file mode 100644 index 19ebb3790..000000000 --- a/python-test/docs/agent_groups/create_agent_group_with_multiple_tags.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create agent group with multiple tags -## Steps: -1 - Create an agent group with more than one pair (key:value) of tags - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the agent group must be created -- Groups with multiple tags will only match with agents with the same multiple tags \ No newline at end of file diff --git a/python-test/docs/agent_groups/create_agent_group_with_one_tag.md b/python-test/docs/agent_groups/create_agent_group_with_one_tag.md deleted file mode 100644 index 969bb19b5..000000000 --- a/python-test/docs/agent_groups/create_agent_group_with_one_tag.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create agent group with one tag -## Steps: - -1 - Create an agent groups with one pair (key:value) of tags - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the agent group must be created \ No newline at end of file diff --git a/python-test/docs/agent_groups/create_agent_group_without_description.md b/python-test/docs/agent_groups/create_agent_group_without_description.md deleted file mode 100644 index ddfd9f40c..000000000 --- a/python-test/docs/agent_groups/create_agent_group_without_description.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create agent group without description -## Steps: - -1 - Create an agent groups with no description - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the agent group must be created diff --git a/python-test/docs/agent_groups/create_agent_group_without_tag.md b/python-test/docs/agent_groups/create_agent_group_without_tag.md deleted file mode 100644 index 1b9ded0ab..000000000 --- a/python-test/docs/agent_groups/create_agent_group_without_tag.md +++ /dev/null @@ -1,13 +0,0 @@ -## Scenario: Create agent group without tag - -## Steps: -1 - Create an agent with no pair (key:value) of tags - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - - -## Expected Result: -- Request must fail with status code 400 (bad request) and the agent group must not be created - diff --git a/python-test/docs/agent_groups/edit_agent_group_description.md b/python-test/docs/agent_groups/edit_agent_group_description.md deleted file mode 100644 index 244d76c67..000000000 --- a/python-test/docs/agent_groups/edit_agent_group_description.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit agent group description -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2- Edit this group description - -- REST API Method: PUT -- endpoint: /agent_groups/group_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied diff --git a/python-test/docs/agent_groups/edit_agent_group_description_removing_description.md b/python-test/docs/agent_groups/edit_agent_group_description_removing_description.md deleted file mode 100644 index a6a149df5..000000000 --- a/python-test/docs/agent_groups/edit_agent_group_description_removing_description.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit Agent Group description removing description -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2- Edit this group description using None - -- REST API Method: PUT -- endpoint: /agent_groups/group_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied diff --git a/python-test/docs/agent_groups/edit_agent_group_name,_description_and_tags.md b/python-test/docs/agent_groups/edit_agent_group_name,_description_and_tags.md deleted file mode 100644 index 2bca63469..000000000 --- a/python-test/docs/agent_groups/edit_agent_group_name,_description_and_tags.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit Agent Group name, description and tags -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2- Edit this group name, description and tags - -- REST API Method: PUT -- endpoint: /agent_groups/group_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied diff --git a/python-test/docs/agent_groups/edit_agent_group_name.md b/python-test/docs/agent_groups/edit_agent_group_name.md deleted file mode 100644 index e5b1bbed7..000000000 --- a/python-test/docs/agent_groups/edit_agent_group_name.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit agent group name -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2- Edit this group name - -- REST API Method: PUT -- endpoint: /agent_groups/group_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied diff --git a/python-test/docs/agent_groups/edit_agent_group_name_removing_name.md b/python-test/docs/agent_groups/edit_agent_group_name_removing_name.md deleted file mode 100644 index b67eb0f71..000000000 --- a/python-test/docs/agent_groups/edit_agent_group_name_removing_name.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit Agent Group name removing name -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2- Edit this group name using None - -- REST API Method: PUT -- endpoint: /agent_groups/group_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 400 (error) and changes must not be applied diff --git a/python-test/docs/agent_groups/edit_agent_group_removing_tags.md b/python-test/docs/agent_groups/edit_agent_group_removing_tags.md deleted file mode 100644 index d4e1d1d9b..000000000 --- a/python-test/docs/agent_groups/edit_agent_group_removing_tags.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit Agent Group removing tags -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2- Edit this group tags using None - -- REST API Method: PUT -- endpoint: /agent_groups/group_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 400 (error) and changes must not be applied \ No newline at end of file diff --git a/python-test/docs/agent_groups/edit_agent_group_tag.md b/python-test/docs/agent_groups/edit_agent_group_tag.md deleted file mode 100644 index 5ccb0c71b..000000000 --- a/python-test/docs/agent_groups/edit_agent_group_tag.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: Edit agent group tag -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2- Edit this group tag - -- REST API Method: PUT -- endpoint: /agent_groups/group_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied - diff --git a/python-test/docs/agent_groups/edit_agent_group_tags_to_subscribe_agent.md b/python-test/docs/agent_groups/edit_agent_group_tags_to_subscribe_agent.md deleted file mode 100644 index c40f98416..000000000 --- a/python-test/docs/agent_groups/edit_agent_group_tags_to_subscribe_agent.md +++ /dev/null @@ -1,13 +0,0 @@ -## Scenario: Edit Agent Group tags to subscribe agent - -Steps: -- -1. Provision an agent with tags -2. Create a group with different tags -3. Edit groups' tags changing the value to match with agent - - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is subscribed to the group \ No newline at end of file diff --git a/python-test/docs/agent_groups/edit_agent_group_tags_to_unsubscribe_agent.md b/python-test/docs/agent_groups/edit_agent_group_tags_to_unsubscribe_agent.md deleted file mode 100644 index c915b7636..000000000 --- a/python-test/docs/agent_groups/edit_agent_group_tags_to_unsubscribe_agent.md +++ /dev/null @@ -1,11 +0,0 @@ -## Scenario: Edit Agent Group tags to unsubscribe agent -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags -3. Edit groups' tags changing the value - -Expected result: -- -- Agent heartbeat must show 0 group matching -- Agent logs must show that agent is unsubscribed to the group \ No newline at end of file diff --git a/python-test/docs/agent_groups/edit_an_agent_group_through_the_details_modal.md b/python-test/docs/agent_groups/edit_an_agent_group_through_the_details_modal.md deleted file mode 100644 index f416d8788..000000000 --- a/python-test/docs/agent_groups/edit_an_agent_group_through_the_details_modal.md +++ /dev/null @@ -1,13 +0,0 @@ -## Scenario: Edit an agent group through the details modal -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2 - On agent groups' page (`orb.live/pages/fleet/groups`) click on details button -3 - Click on "edit" button - -## Expected Result: -- User should be redirected to this agent group's edit page and should be able to make changes diff --git a/python-test/docs/agent_groups/remove_agent_group_using_correct_name.md b/python-test/docs/agent_groups/remove_agent_group_using_correct_name.md deleted file mode 100644 index 7e84779b5..000000000 --- a/python-test/docs/agent_groups/remove_agent_group_using_correct_name.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Remove agent group using correct name -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2 - On agent groups' page (`orb.live/pages/fleet/groups`) click on remove button -3 - Insert the name of the group correctly on delete modal -4 - Confirm the operation by clicking on "I UNDERSTAND, DELETE THIS AGENT GROUP" button - -## Expected Result: -- Agent group must be deleted - - diff --git a/python-test/docs/agent_groups/remove_agent_group_using_incorrect_name.md b/python-test/docs/agent_groups/remove_agent_group_using_incorrect_name.md deleted file mode 100644 index 5d50d81e6..000000000 --- a/python-test/docs/agent_groups/remove_agent_group_using_incorrect_name.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Remove agent group using incorrect name -## Steps: -1 - Create an agent group - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2 - On agent groups' page (`orb.live/pages/fleet/groups`) click on remove button -3 - Insert the name of the group incorrectly on delete modal - -## Expected Result: -- "I UNDERSTAND, DELETE THIS AGENT GROUP" button must not be enabled -- After user close the deletion modal, agent group must not be deleted diff --git a/python-test/docs/agent_groups/test_agent_groups_filters.md b/python-test/docs/agent_groups/test_agent_groups_filters.md deleted file mode 100644 index a30335fb2..000000000 --- a/python-test/docs/agent_groups/test_agent_groups_filters.md +++ /dev/null @@ -1,22 +0,0 @@ -## Scenario: Test agent groups filters -## Steps: -1 - Create multiple agent groups - -- REST API Method: POST -- endpoint: /agent_groups -- header: {authorization:token} - -2 - On agent groups' page (`orb.live/pages/fleet/groups`) use the filter: - -* Name -* Description -* Agents -* Tags -* Search by - - -## Expected Result: - -- All filters must be working properly - - diff --git a/python-test/docs/agent_groups/visualize_matching_agents.md b/python-test/docs/agent_groups/visualize_matching_agents.md deleted file mode 100644 index 264d60c4a..000000000 --- a/python-test/docs/agent_groups/visualize_matching_agents.md +++ /dev/null @@ -1,10 +0,0 @@ -## Scenario: Visualize matching agents -## Steps: -1 - On agent groups' page (`orb.live/pages/fleet/groups`) click on the number with link o "agents" column - -## Expected Result: - -- Matching Agents modal must be displayed - - - If 0 agents matches: `No data to display` and 0 total - - If one or more agents matches: all matching agents and total number of matches ust be displayed \ No newline at end of file diff --git a/python-test/docs/agents/check_agent_details.md b/python-test/docs/agents/check_agent_details.md deleted file mode 100644 index ae080946e..000000000 --- a/python-test/docs/agents/check_agent_details.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: Check agent details -## Steps: -1 - Create an agent - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2 - Get an agent - -- REST API Method: GET -- endpoint: /agents/agent_id - -## Expected Result: -- Status code must be 200 and an agent name, channel id, ts_created, status and tags must be returned on response - * If an agent container was never provisioned, status must be `new` - * If an agent container is running, status must be `online` - * If an agent container is stopped/removed, status must be `offline` diff --git a/python-test/docs/agents/check_if_is_possible_cancel_operations_with_no_change.md b/python-test/docs/agents/check_if_is_possible_cancel_operations_with_no_change.md deleted file mode 100644 index df32ebb1c..000000000 --- a/python-test/docs/agents/check_if_is_possible_cancel_operations_with_no_change.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Check if is possible cancel operations with no change -## Steps: -1 - Create an agent - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2 - On agents' page (`orb.live/pages/fleet/agents`) click on edit button -3 - Change agents' name and click "next" -4 - Change agent's tag and click "next" -5 - Click "back" until return to agents' page - -## Expected Result: -- No changes must have been applied to the agent - diff --git a/python-test/docs/agents/check_if_total_agent_on_agents'_page_is_correct.md b/python-test/docs/agents/check_if_total_agent_on_agents'_page_is_correct.md deleted file mode 100644 index 7cec046f2..000000000 --- a/python-test/docs/agents/check_if_total_agent_on_agents'_page_is_correct.md +++ /dev/null @@ -1,19 +0,0 @@ -## Scenario: Check if total agent on agents' page is correct -## Steps: -1 - Create multiple agents - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2 - Get all existing agents - -- REST API Method: GET -- endpoint: /agents - -3 - On agents' page (`orb.live/pages/fleet/agents`) check the total number of agents at the end of the agents table - -4 - Count the number of existing agents - -## Expected Result: -- Total agents on API response, agents page and the real number must be the same diff --git a/python-test/docs/agents/create_agent_with_duplicate_name.md b/python-test/docs/agents/create_agent_with_duplicate_name.md deleted file mode 100644 index d4ab6e30e..000000000 --- a/python-test/docs/agents/create_agent_with_duplicate_name.md +++ /dev/null @@ -1,13 +0,0 @@ -## Scenario: Create agent with duplicate name -## Steps: -1 - Create an agent - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2 - Create another agent using the same agent name - -## Expected Result: -- First request must have status code 201 (created) and one agent must be created on orb -- Second request must fail with status code 409 (conflict) and no other agent must be created (make sure that first agent has not been modified) diff --git a/python-test/docs/agents/create_agent_with_invalid_name_(regex).md b/python-test/docs/agents/create_agent_with_invalid_name_(regex).md deleted file mode 100644 index 46c6647b5..000000000 --- a/python-test/docs/agents/create_agent_with_invalid_name_(regex).md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Create agent with invalid name (regex) -## Steps: -1 - Create an agent using an invalid regex to agent name - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} -- example of invalid regex: - - * name starting with non-alphabetic characters - * name with just 1 letter - * space-separated composite name - -## Expected Result: -- Request must fail with status code 400 (bad request) and no agent must be created diff --git a/python-test/docs/agents/create_agent_with_multiple_tags.md b/python-test/docs/agents/create_agent_with_multiple_tags.md deleted file mode 100644 index e635a4b4d..000000000 --- a/python-test/docs/agents/create_agent_with_multiple_tags.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create agent with multiple tags -## Steps: -1 - Create an agent with more than one pair (key:value) of tags - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the agent must be created -- Agent with multiple tags will match each tag individually diff --git a/python-test/docs/agents/create_agent_with_one_tag.md b/python-test/docs/agents/create_agent_with_one_tag.md deleted file mode 100644 index 1d4c52c3b..000000000 --- a/python-test/docs/agents/create_agent_with_one_tag.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create agent with one tag -## Steps: - -1 - Create an agent with one pair (key:value) of tags - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the agent must be created diff --git a/python-test/docs/agents/create_agent_without_tags.md b/python-test/docs/agents/create_agent_without_tags.md deleted file mode 100644 index fb43439fb..000000000 --- a/python-test/docs/agents/create_agent_without_tags.md +++ /dev/null @@ -1,13 +0,0 @@ -## Scenario: Create agent without tags - -## Steps: -1 - Create an agent with no pair (key:value) of tags - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the agent must be created - diff --git a/python-test/docs/agents/edit_agent_name.md b/python-test/docs/agents/edit_agent_name.md deleted file mode 100644 index 8fb911f02..000000000 --- a/python-test/docs/agents/edit_agent_name.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: Edit agent name - -## Steps: -1 - Create an agent - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2- Edit this agent name - -- REST API Method: PUT -- endpoint: /agents/agent_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied diff --git a/python-test/docs/agents/edit_agent_name_and_tags.md b/python-test/docs/agents/edit_agent_name_and_tags.md deleted file mode 100644 index fe1b29a4a..000000000 --- a/python-test/docs/agents/edit_agent_name_and_tags.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit agent name and tags -## Steps: -1 - Create an agent - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2- Edit this agent and tags - -- REST API Method: PUT -- endpoint: /agents/agent_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied \ No newline at end of file diff --git a/python-test/docs/agents/edit_agent_tag.md b/python-test/docs/agents/edit_agent_tag.md deleted file mode 100644 index 5d5a0049b..000000000 --- a/python-test/docs/agents/edit_agent_tag.md +++ /dev/null @@ -1,19 +0,0 @@ -## Scenario: Edit agent tag - -## Steps: -1 - Create an agent - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2- Edit this agent tag - -- REST API Method: PUT -- endpoint: /agents/agent_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied - diff --git a/python-test/docs/agents/edit_an_agent_through_the_details_modal.md b/python-test/docs/agents/edit_an_agent_through_the_details_modal.md deleted file mode 100644 index 71bf39677..000000000 --- a/python-test/docs/agents/edit_an_agent_through_the_details_modal.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Edit an agent through the details modal -## Steps: -1 - Create an agent - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2 - On agents' page (`orb.live/pages/fleet/agents`) click on details button -3 - Click on "edit" button - -## Expected Result: -- User should be redirected to this agent's edit page and should be able to make changes - diff --git a/python-test/docs/agents/insert_tags_in_agents_created_without_tags.md b/python-test/docs/agents/insert_tags_in_agents_created_without_tags.md deleted file mode 100644 index 880755759..000000000 --- a/python-test/docs/agents/insert_tags_in_agents_created_without_tags.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Insert tags in agents created without tags -## Steps: -1 - Create an agent with no pair (key:value) of tags - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2- Edit this agent and insert at least one pair of tag - -- REST API Method: PUT -- endpoint: /agents/agent_id -- header: {authorization:token} - -## Expected Result: - -- Request must have status code 200 and tags must be added to the agent \ No newline at end of file diff --git a/python-test/docs/agents/remove_agent_using_correct_name.md b/python-test/docs/agents/remove_agent_using_correct_name.md deleted file mode 100644 index 7e9c8b94c..000000000 --- a/python-test/docs/agents/remove_agent_using_correct_name.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Remove agent using correct name -## Steps: -1 - Create an agent - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2 - On agents' page (`orb.live/pages/fleet/agents`) click on remove button -3 - Insert the name of the agent correctly on delete modal -4 - Confirm the operation by clicking on "I UNDERSTAND, DELETE THIS AGENT" button - -## Expected Result: -- Agent must be deleted diff --git a/python-test/docs/agents/remove_agent_using_incorrect_name.md b/python-test/docs/agents/remove_agent_using_incorrect_name.md deleted file mode 100644 index 33b78fb1f..000000000 --- a/python-test/docs/agents/remove_agent_using_incorrect_name.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Remove agent using incorrect name -## Steps: -1 - Create an agent - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2 - On agents' page (`orb.live/pages/fleet/agents`) click on remove button -3 - Insert the name of the agent incorrectly on delete modal - -## Expected Result: -- "I UNDERSTAND, DELETE THIS AGENT" button must not be enabled -- After user close the deletion modal, agent must not be deleted - diff --git a/python-test/docs/agents/run_two_orb_agents_on_different_ports.md b/python-test/docs/agents/run_two_orb_agents_on_different_ports.md deleted file mode 100644 index 2e72cd28b..000000000 --- a/python-test/docs/agents/run_two_orb_agents_on_different_ports.md +++ /dev/null @@ -1,8 +0,0 @@ -## Scenario: Run two orb agents on different ports - -## Steps: -1 - Provision an agent -2 - Provision another agent on a different port - - Use environmental variable: `PKTVISOR_PCAP_IFACE_DEFAULT` to set the port -## Expected Result: -- Both containers must be running \ No newline at end of file diff --git a/python-test/docs/agents/run_two_orb_agents_on_the_same_port.md b/python-test/docs/agents/run_two_orb_agents_on_the_same_port.md deleted file mode 100644 index 3bfb1c7bb..000000000 --- a/python-test/docs/agents/run_two_orb_agents_on_the_same_port.md +++ /dev/null @@ -1,9 +0,0 @@ -## Scenario: Run two orb agents on the same port - -## Steps: -1 - Provision an agent -2 - Provision another agent on same port - -## Expected Result: -- Second container must be exited -- the container logs should contain the message "agent startup error" \ No newline at end of file diff --git a/python-test/docs/agents/save_agent_without_tag.md b/python-test/docs/agents/save_agent_without_tag.md deleted file mode 100644 index 7f7fda807..000000000 --- a/python-test/docs/agents/save_agent_without_tag.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Save agent without tag -## Steps: -1 - Create an agent with at least one pair (key:value) of tags - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2- Edit this agent tag and remove all pairs - -- REST API Method: PUT -- endpoint: /agents/agent_id -- header: {authorization:token} - -## Expected Result: -- Request must have status code 200 and all tags must be removed from the agent diff --git a/python-test/docs/agents/test_agent_filters.md b/python-test/docs/agents/test_agent_filters.md deleted file mode 100644 index 4e6ea363e..000000000 --- a/python-test/docs/agents/test_agent_filters.md +++ /dev/null @@ -1,19 +0,0 @@ -## Scenario: Test agent filters -## Steps: -1 - Create multiple agents - -- REST API Method: POST -- endpoint: /agents -- header: {authorization:token} - -2 - On agents' page (`orb.live/pages/fleet/agents`) use the filter: - - * Name - * Status - * Tags - * Search by - - -## Expected Result: - -- All filters must be working properly diff --git a/python-test/docs/datasets/check_datasets_details.md b/python-test/docs/datasets/check_datasets_details.md deleted file mode 100644 index 947d2d7ed..000000000 --- a/python-test/docs/datasets/check_datasets_details.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Check datasets details -## Steps: -1 - Create a dataset - -- REST API Method: POST -- endpoint: /policies/dataset -- header: {authorization:token} - -2 - Get a dataset - -- REST API Method: GET -- endpoint: /policies/dataset/dataset_id - -## Expected Result: -- Status code must be 200 and the dataset name, validity, agent group linked, agent policy linked and sink linked must be returned on response - - \ No newline at end of file diff --git a/python-test/docs/datasets/check_if_is_possible_cancel_operations_with_no_change.md b/python-test/docs/datasets/check_if_is_possible_cancel_operations_with_no_change.md deleted file mode 100644 index 1b15b2637..000000000 --- a/python-test/docs/datasets/check_if_is_possible_cancel_operations_with_no_change.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Check if is possible cancel operations with no change -## Steps: -1 - Create a dataset - -- REST API Method: POST -- endpoint: /policies/dataset -- header: {authorization:token} - -2 - On datasets' page (`orb.live/pages/datasets/list`) click on edit button -3 - Change groups' name and click "next" -4 - Change sink linked and click "next" - -## Expected Result: -- No changes must have been applied to the dataset \ No newline at end of file diff --git a/python-test/docs/datasets/check_if_total_datasets_on_datasets'_page_is_correct.md b/python-test/docs/datasets/check_if_total_datasets_on_datasets'_page_is_correct.md deleted file mode 100644 index c34b5d9e6..000000000 --- a/python-test/docs/datasets/check_if_total_datasets_on_datasets'_page_is_correct.md +++ /dev/null @@ -1,20 +0,0 @@ -## Scenario: Check if total datasets on datasets' page is correct -## Steps: -1 - Create multiple datasets - -- REST API Method: POST -- endpoint: /policies/dataset -- header: {authorization:token} - -2 - Get all existing datasets - -- REST API Method: GET -- endpoint: /policies/dataset - -3 - On datasets' page (`orb.live/pages/datasets/list`) check the total number of datasets at the end of the dataset table - -4 - Count the number of existing datasets - -## Expected Result: -- Total datasets on API response, datasets page and the real number must be the same - diff --git a/python-test/docs/datasets/create_dataset.md b/python-test/docs/datasets/create_dataset.md deleted file mode 100644 index 437e4acef..000000000 --- a/python-test/docs/datasets/create_dataset.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create dataset -## Steps: - -1 - Create a dataset with no description - -- REST API Method: POST -- endpoint: /policies/dataset -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the agent group must be created \ No newline at end of file diff --git a/python-test/docs/datasets/create_dataset_with_invalid_name_(regex).md b/python-test/docs/datasets/create_dataset_with_invalid_name_(regex).md deleted file mode 100644 index 0c1cd853e..000000000 --- a/python-test/docs/datasets/create_dataset_with_invalid_name_(regex).md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Create dataset with invalid name (regex) -## Steps: -1 - Create a dataset using an invalid regex to dataset name - -- REST API Method: POST -- endpoint: /policies/dataset -- header: {authorization:token} -- example of invalid regex: - -* name starting with non-alphabetic characters -* name with just 1 letter -* space-separated composite name - -## Expected Result: -- Request must fail with status code 400 (bad request) and no dataset must be created \ No newline at end of file diff --git a/python-test/docs/datasets/edit_a_dataset_through_the_details_modal.md b/python-test/docs/datasets/edit_a_dataset_through_the_details_modal.md deleted file mode 100644 index c89db9d65..000000000 --- a/python-test/docs/datasets/edit_a_dataset_through_the_details_modal.md +++ /dev/null @@ -1,13 +0,0 @@ -## Scenario: Edit a dataset through the details modal -## Steps: -1 - Create a dataset - -- REST API Method: POST -- endpoint: /policies/dataset -- header: {authorization:token} - -2 - On datasets' page (`orb.live/pages/datasets/list`) click on details button -3 - Click on "edit" button - -## Expected Result: -- User should be redirected to this dataset's edit page and should be able to make changes \ No newline at end of file diff --git a/python-test/docs/datasets/edit_dataset_name.md b/python-test/docs/datasets/edit_dataset_name.md deleted file mode 100644 index 33032682c..000000000 --- a/python-test/docs/datasets/edit_dataset_name.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit dataset name -## Steps: -1 - Create a dataset - -- REST API Method: POST -- endpoint: /policies/dataset -- header: {authorization:token} - -2- Edit this dataset name - -- REST API Method: PUT -- endpoint: /policies/dataset/dataset_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied \ No newline at end of file diff --git a/python-test/docs/datasets/edit_dataset_sink.md b/python-test/docs/datasets/edit_dataset_sink.md deleted file mode 100644 index 67701ff1c..000000000 --- a/python-test/docs/datasets/edit_dataset_sink.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit dataset sink -## Steps: -1 - Create a dataset - -- REST API Method: POST -- endpoint: /policies/dataset -- header: {authorization:token} - -2- Edit this dataset sink - -- REST API Method: PUT -- endpoint: /policies/dataset/dataset_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied \ No newline at end of file diff --git a/python-test/docs/datasets/remove_dataset_using_correct_name.md b/python-test/docs/datasets/remove_dataset_using_correct_name.md deleted file mode 100644 index 8687b0096..000000000 --- a/python-test/docs/datasets/remove_dataset_using_correct_name.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Remove dataset using correct name -## Steps: -1 - Create a dataset - -- REST API Method: POST -- endpoint: /policies/dataset -- header: {authorization:token} - -2 - On datasets' page (`orb.live/pages/datasets/list`) click on remove button -3 - Insert the name of the dataset correctly on delete modal -4 - Confirm the operation by clicking on "I UNDERSTAND, DELETE THIS DATASET" button - -## Expected Result: -- Dataset must be deleted diff --git a/python-test/docs/datasets/remove_dataset_using_incorrect_name.md b/python-test/docs/datasets/remove_dataset_using_incorrect_name.md deleted file mode 100644 index 45fce22b4..000000000 --- a/python-test/docs/datasets/remove_dataset_using_incorrect_name.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Remove dataset using incorrect name -## Steps: -1 - Create a dataset - -- REST API Method: POST -- endpoint: /policies/dataset -- header: {authorization:token} - -2 - On datasets' page (`orb.live/pages/datasets/list`) click on remove button -3 - Insert the name of the dataset correctly on delete modal - -## Expected Result: -- "I UNDERSTAND, DELETE THIS DATASET" button must not be enabled -- After user close the deletion modal, dataset must not be deleted diff --git a/python-test/docs/datasets/test_datasets_filter.md b/python-test/docs/datasets/test_datasets_filter.md deleted file mode 100644 index d4ab2af87..000000000 --- a/python-test/docs/datasets/test_datasets_filter.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Test datasets filter -## Steps: -1 - Create multiple datasets - -- REST API Method: POST -- endpoint: /policies/dataset -- header: {authorization:token} - -2 - On datasets' page (`orb.live/pages/datasets/list`) use the filter: - -* Name -* Search by - - -## Expected Result: - -- All filters must be working properly diff --git a/python-test/docs/development_guide.md b/python-test/docs/development_guide.md deleted file mode 100644 index f87bc1d92..000000000 --- a/python-test/docs/development_guide.md +++ /dev/null @@ -1,214 +0,0 @@ -## **INTEGRATION** - -| Integration Scenario | Automated via API | Automated via UI | Smoke | Sanity | -|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------:|:----------------:|:-----:|:------:| -| [Check if sink is active while scraping metrics](integration/sink_active_while_scraping_metrics.md) | ✅ | | 👍 | 👍 | -| [Check if sink with invalid credentials becomes active](integration/sink_error_invalid_credentials.md) | ✅ | | 👍 | 👍 | -| [Check if after 30 minutes without data sink becomes idle](integration/sink_idle_30_minutes.md) | | | | | -| [Provision agent before group (check if agent subscribes to the group)](integration/provision_agent_before_group.md) | ✅ | | 👍 | 👍 | -| [Provision agent after group (check if agent subscribes to the group)](integration/provision_agent_after_group.md) | ✅ | | 👍 | 👍 | -| [Provision agent with tag matching existing group linked to a valid dataset](integration/multiple_agents_subscribed_to_a_group.md) | ✅ | | 👍 | 👍 | -| [Apply multiple simple policies to a group](integration/apply_multiple_policies.md) | ✅ | | 👍 | 👍 | -| [Apply multiple advanced policies to a group](integration/apply_multiple_policies.md) | ✅ | | 👍 | 👍 | -| [Apply multiple policies to a group and remove one policy](integration/remove_one_of_multiple_policies.md) | ✅ | | 👍 | 👍 | -| [Apply multiple policies to a group and remove all of them](integration/remove_all_policies.md) | | | | | -| [Apply multiple policies to a group and remove one dataset](integration/remove_one_of_multiple_datasets.md) | ✅ | | 👍 | 👍 | -| [Apply multiple policies to a group and remove all datasets](integration/remove_all_datasets.md) | | | | | -| [Apply the same policy twice to the agent](integration/apply_policy_twice.md) | ✅ | | 👍 | 👍 | -| [Delete sink linked to a dataset, create another one and edit dataset using new sink](integration/change_sink_on_dataset.md) | ✅ | | 👍 | 👍 | -| [Remove one of multiples datasets that apply the same policy to the agent](integration/remove_one_dataset_of_multiples_with_same_policy.md) | | | | | -| [Remove group (invalid dataset, agent logs)](integration/remove_group.md) | ✅ | | 👍 | 👍 | -| [Remove sink (invalid dataset, agent logs)](integration/remove_sink.md) | ✅ | | 👍 | 👍 | -| [Remove policy (invalid dataset, agent logs, heartbeat)](integration/remove_policy.md) | ✅ | | 👍 | 👍 | -| [Remove dataset (check agent logs, heartbeat)](integration/remove_dataset.md) | ✅ | | 👍 | 👍 | -| [Remove agent container (logs, agent groups matches)](integration/remove_agent_container.md) | ✅ | | 👍 | 👍 | -| [Remove agent container force (logs, agent groups matches)](integration/remove_agent_container_force.md) | ✅ | | | 👍 | -| [Remove agent (logs, agent groups matches)](integration/remove_agent.md) | ✅ | | 👍 | 👍 | -| [Subscribe an agent to multiple groups created before agent provisioning](integration/subscribe_an_agent_to_multiple_groups_created_before_agent_provisioning.md) | ✅ | | 👍 | 👍 | -| [Subscribe an agent to multiple groups created after agent provisioning](integration/subscribe_an_agent_to_multiple_groups_created_after_agent_provisioning.md) | ✅ | | 👍 | 👍 | -| [Agent subscription to group after editing orb agent's tags (editing before agent provision)](integration/agent_subscription_to_group_after_editing_agent's_tags.md) | ✅ | | 👍 | 👍 | -| [Agent subscription to group after editing orb agent's tags (editing after agent provision and after groups creation)](integration/agent_subscription_to_group_after_editing_agent's_tags.md) | ✅ | | 👍 | 👍 | -| [Agent subscription to group after editing orb agent's tags (editing after agent provision and before second group creation)](integration/agent_subscription_to_group_after_editing_agent's_tags.md) | ✅ | | 👍 | 👍 | -| [Agent subscription to group with policies after editing orb agent's tags](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags.md) | ✅ | | 👍 | 👍 | -| [Agent subscription to multiple groups with policies after editing orb agent's tags](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags.md) | ✅ | | 👍 | 👍 | -| [Edit agent name and apply policies to then](integration/edit_agent_name_and_apply_policies_to_then.md) | ✅ | | 👍 | 👍 | -| [Insert tags in agents created without tags and apply policies to group matching new tags.md](integration/insert_tags_in_agents_created_without_tags_and_apply_policies_to_group_matching_new_tags.md) | ✅ | | 👍 | 👍 | -| [Agent unsubscription to group with policies after editing agent group's tags (editing tags after agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md) | ✅ | | 👍 | 👍 | -| [Agent subscription to group with policies after editing agent group's tags (editing tags after agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md) | ✅ | | 👍 | 👍 | -| [Agent unsubscription to group with policies after editing agent group's tags (editing tags before agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md) | ✅ | | 👍 | 👍 | -| [Agent subscription to group with policies after editing agent group's tags (editing tags before agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md) | ✅ | | 👍 | 👍 | -| [Agent unsubscription to group with policies after editing orb agent's tags and agent group's tags (editing tags after agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md) | ✅ | | 👍 | 👍 | -| [Agent subscription to group with policies after editing orb agent's tags and agent group's tags (editing tags after agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md) | ✅ | | 👍 | 👍 | -| [Agent unsubscription to group with policies after editing orb agent's tags and agent group's tags (editing tags before agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md) | ✅ | | 👍 | 👍 | -| [Agent subscription to group with policies after editing orb agent's tags and agent group's tags (editing tags before agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md) | ✅ | | 👍 | 👍 | -| [Remotely reset an agent with policies](integration/reset_agent_remotely.md) | ✅ | ✅ | 👍 | 👍 | -| [Remotely reset an agent without policies](integration/reset_agent_remotely.md) | ✅ | ✅ | 👍 | 👍 | ---------------------------------- -## **LOGIN** - - -| Login Scenario | Automated via API | Automated via UI | Smoke | Sanity | -|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------:|:----------------:|:-----:|:------:| -| [Request registration of a registered account using registered password username and company](login/request_registration_of_a_registered_account_using_registered_password_username_and_company.md) | ✅ | | 👍 | 👍 | -| [Request registration of a registered account using registered password and username](login/request_registration_of_a_registered_account_using_registered_password_and_username.md) | ✅ | | 👍 | 👍 | -| [Request registration of a registered account using registered password and company](login/request_registration_of_a_registered_account_using_registered_password_and_company.md) | ✅ | | 👍 | 👍 | -| [Request registration of a registered account using registered password](login/request_registration_of_a_registered_account_using_registered_password.md) | ✅ | | 👍 | 👍 | -| [Request registration of a registered account using unregistered password username and company](login/request_registration_of_a_registered_account_using_unregistered_password_username_and_company.md) | ✅ | | 👍 | 👍 | -| [Request registration of a registered account using unregistered password and username](login/request_registration_of_a_registered_account_using_unregistered_password_and_username.md) | ✅ | | 👍 | 👍 | -| [Request registration of a registered account using unregistered password and company](login/request_registration_of_a_registered_account_using_unregistered_password_and_company.md) | ✅ | | 👍 | 👍 | -| [Request registration of a registered account using unregistered password](login/request_registration_of_a_registered_account_using_unregistered_password.md) | ✅ | | 👍 | 👍 | -| [Request registration of an unregistered account with valid password and invalid email](login/request_registration_of_an_unregistered_account_with_valid_password_and_invalid_email.md) | ✅ | | 👍 | 👍 | -| [Request registration of an unregistered account with valid password and valid email](login/request_registration_of_an_unregistered_account_with_valid_password_and_valid_email.md) | ✅ | | 👍 | 👍 | -| [Request registration of an unregistered account with invalid password and valid email](login/request_registration_of_an_unregistered_account_with_invalid_password_and_valid_email.md) | ✅ | | 👍 | 👍 | -| [Request registration of an unregistered account with invalid password and invalid email](login/request_registration_of_an_unregistered_account_with_invalid_password_and_invalid_email.md) | ✅ | | 👍 | 👍 | -| [Check if email and password are required fields](login/check_if_email_and_password_are_required_fields.md) | ✅ | | 👍 | 👍 | -| [Login with valid credentials](login/login_with_valid_credentials.md) | ✅ | ✅ | 👍 | 👍 | -| [Login with invalid credentials](login/login_with_invalid_credentials.md) | ✅ | | 👍 | 👍 | -| [Request password with registered email address](login/request_password_with_registered_email_address.md) | | | 👍 | 👍 | -| [Request password with unregistered email address](login/request_password_with_unregistered_email_address.md) | | | | 👍 | - ---------------------------------- -## **AGENTS** - - -| Agents Scenario | Automated via API | Automated via UI | Smoke | Sanity | -|:------------------------------------------------------------------------------------------------------------------------:|:-----------------:|:----------------:|:-----:|:------:| -| [Check if total agent on agents' page is correct](agents/check_if_total_agent_on_agents'_page_is_correct.md) | | | | | -| [Create agent without tags](agents/create_agent_without_tags.md) | ✅ | | | 👍 | -| [Create agent with one tag](agents/create_agent_with_one_tag.md) | ✅ | ✅ | 👍 | 👍 | -| [Create agent with multiple tags](agents/create_agent_with_multiple_tags.md) | ✅ | | | 👍 | -| [Create agent with invalid name (regex)](agents/create_agent_with_invalid_name_(regex).md) | | | | 👍 | -| [Create agent with duplicate name](agents/create_agent_with_duplicate_name.md) | | | | 👍 | -| [Test agent filters](agents/test_agent_filters.md) | | | | | -| [Check agent details](agents/check_agent_details.md) | | | | 👍 | -| [Edit an agent through the details modal](agents/edit_an_agent_through_the_details_modal.md) | | | | 👍 | -| [Edit agent name](agents/edit_agent_name.md) | ✅ | | 👍 | 👍 | -| [Edit agent tag](agents/edit_agent_tag.md) | ✅ | | 👍 | 👍 | -| [Edit agent name and tag](agents/edit_agent_name_and_tags.md) | ✅ | | | 👍 | -| [Save agent without tag](agents/save_agent_without_tag.md) | ✅ | | | 👍 | -| [Insert tags in agents created without tags](agents/insert_tags_in_agents_created_without_tags.md) | ✅ | | 👍 | 👍 | -| [Check if is possible cancel operations with no change](agents/check_if_is_possible_cancel_operations_with_no_change.md) | | | | | -| [Remove agent using correct name](agents/remove_agent_using_correct_name.md) | ✅ | | 👍 | 👍 | -| [Remove agent using incorrect name](agents/remove_agent_using_incorrect_name.md) | | | | 👍 | -| [Run two orb agents on the same port](agents/run_two_orb_agents_on_the_same_port.md) | ✅ | ✅ | 👍 | 👍 | -| [Run two orb agents on different ports](agents/run_two_orb_agents_on_different_ports.md) | ✅ | ✅ | 👍 | 👍 | - ---------------------------------- -## **AGENT GROUP** - - -| Agent Group Scenario | Automated via API | Automated via UI | Smoke | Sanity | -|:--------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------:|:----------------:|:-----:|:------:| -| [Check if total agent groups on agent groups' page is correct](agent_groups/check_if_total_agent_groups_on_agent_groups'_page_is_correct.md) | | | | | -| [Create agent group with invalid name (regex)](agent_groups/create_agent_group_with_invalid_name_(regex).md) | | | | 👍 | -| [Create agent group with duplicate name](agent_groups/create_agent_group_with_duplicate_name.md) | | | | 👍 | -| [Create agent group with description](agent_groups/create_agent_group_with_description.md) | ✅ | | 👍 | 👍 | -| [Create agent group without description](agent_groups/create_agent_group_without_description.md) | ✅ | | | 👍 | -| [Create agent group without tag](agent_groups/create_agent_group_without_tag.md) | ✅ | | | 👍 | -| [Create agent group with one tag](agent_groups/create_agent_group_with_one_tag.md) | ✅ | | 👍 | 👍 | -| [Create agent group with multiple tags](agent_groups/create_agent_group_with_multiple_tags.md) | ✅ | | | 👍 | -| [Test agent groups filters](agent_groups/test_agent_groups_filters.md) | | | | | -| [Visualize matching agents](agent_groups/visualize_matching_agents.md) | ✅ | | | 👍 | -| [Check agent groups details](agent_groups/check_agent_groups_details.md) | | | | 👍 | -| [Edit an agent group through the details modal](agent_groups/edit_an_agent_group_through_the_details_modal.md) | | | | 👍 | -| [Check if is possible cancel operations with no change](agent_groups/check_if_is_possible_cancel_operations_with_no_change.md) | | | | | -| [Edit agent group name](agent_groups/edit_agent_group_name.md) | ✅ | | 👍 | 👍 | -| [Edit agent group description](agent_groups/edit_agent_group_description.md) | ✅ | | | 👍 | -| [Edit agent group tag](agent_groups/edit_agent_group_tag.md) | ✅ | | 👍 | 👍 | -| [Remove agent group using correct name](agent_groups/remove_agent_group_using_correct_name.md) | ✅ | | 👍 | 👍 | -| [Remove agent group using incorrect name](agent_groups/remove_agent_group_using_incorrect_name.md) | | | | 👍 | - | [Edit Agent Group name removing name](agent_groups/edit_agent_group_name_removing_name.md) | ✅ | | 👍 | 👍 | - | [Edit Agent Group description removing description](agent_groups/edit_agent_group_description_removing_description.md) | ✅ | | 👍 | 👍 | - | [Edit Agent Group description](agent_groups/edit_agent_group_description.md) | ✅ | | 👍 | 👍 | - | [Edit Agent Group tags to unsubscribe agent](agent_groups/edit_agent_group_tags_to_unsubscribe_agent.md) | ✅ | | 👍 | 👍 | - | [Edit Agent Group tags to unsubscribe agent](agent_groups/edit_agent_group_tags_to_unsubscribe_agent.md) | ✅ | | 👍 | 👍 | - | [Edit Agent Group removing tags](agent_groups/edit_agent_group_removing_tags.md) | ✅ | | 👍 | 👍 | - | [Edit Agent Group name, description and tags](agent_groups/edit_agent_group_name,_description_and_tags.md) | ✅ | | 👍 | 👍 | - ---------------------------------- -## **SINK** - - -| Sink Scenario | Automated via API | Automated via UI | Smoke | Sanity | -|:---------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------:|:----------------:|:-----:|:------:| -| [Check if total sinks on sinks' page is correct](sinks/check_if_total_sinks_on_sinks'_page_is_correct.md) | | | | | -| [Create sink with invalid name (regex)](sinks/create_sink_with_invalid_name_(regex).md) | | | | 👍 | -| [Create sink with duplicate name](sinks/create_sink_with_duplicate_name.md) | | | | 👍 | -| [Create sink with description](sinks/create_sink_with_description.md) | ✅ | | 👍 | 👍 | -| [Create sink without description](sinks/create_sink_without_description.md) | | | | 👍 | -| [Create sink without tags](sinks/create_sink_without_tags.md) | ✅ | | 👍 | 👍 | -| [Create sink with tags](sinks/create_sink_with_tags.md) | | | | | -| [Create sink with multiple tags](sinks/create_sink_with_multiple_tags.md) | | | | | -| [Check if remote host, username and password are required to create a sink](sinks/check_if_remote_host,_username_and_password_are_required_to_create_a_sink.md) | | | | 👍 | -| [Test sink filters](sinks/test_sink_filters.md) | | | | | -| [Check sink details](sinks/check_sink_details.md) | | | | 👍 | -| [Edit a sink through the details modal](sinks/edit_a_sink_through_the_details_modal.md) | | | | 👍 | -| [Edit sink name](sinks/edit_sink_name.md) | | | | 👍 | -| [Edit sink description](sinks/edit_sink_description.md) | | | | 👍 | -| [Edit sink remote host](sinks/edit_sink_remote_host.md) | | | | 👍 | -| [Edit sink username](sinks/edit_sink_username.md) | | | | 👍 | -| [Edit sink password](sinks/edit_sink_password.md) | | | | 👍 | -| [Edit sink tags](sinks/edit_sink_tags.md) | | | | 👍 | -| [Check if is possible cancel operations with no change](sinks/check_if_is_possible_cancel_operations_with_no_change.md) | | | | | -| [Remove sink using correct name](sinks/remove_sink_using_correct_name.md) | ✅ | | 👍 | 👍 | -| [Remove sink using incorrect name](sinks/remove_sink_using_incorrect_name.md) | | | | 👍 | - - ---------------------------------- -## **POLICIES** - - -| Policies Scenario | Automated via API | Automated via UI | Smoke | Sanity | -|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------:|:----------------:|:-----:|:------:| -| [Check if total policies on policies' page is correct](policies/check_if_total_policies_on_policies'_page_is_correct.md) | | | | | -| [Create policy with invalid name (regex)](policies/create_policy_with_invalid_name_(regex).md) | | | | 👍 | -| [Create policy with no agent provisioned](policies/create_policy_with_no_agent_provisioned.md) | | | | 👍 | -| [Create policy with duplicate name](policies/create_policy_with_duplicate_name.md) | | | | 👍 | -| [Create policy with description](policies/create_policy_with_description.md) | ✅ | | 👍 | 👍 | -| [Create policy without description](policies/create_policy_without_description.md) | ✅ | | | 👍 | -| [Create a policy with dns handler, description, host specification, bpf filter, pcap source, only qname suffix and only rcode](policies/create_policy_with_dns_handler.md) | ✅ | | 👍 | 👍 | -| [Create a policy with dns handler, host specification, bpf filter, pcap source, only qname suffix and only rcode](policies/create_policy_with_dns_handler.md) | ✅ | | 👍 | 👍 | -| [Create a policy with dns handler, bpf filter, pcap source, only qname suffix and only rcode](policies/create_policy_with_dns_handler.md) | ✅ | | 👍 | 👍 | -| [Create a policy with dns handler, pcap source, only qname suffix and only rcode](policies/create_policy_with_dns_handler.md) | ✅ | | 👍 | 👍 | -| [Create a policy with dns handler, only qname suffix](policies/create_policy_with_dns_handler.md) | ✅ | | 👍 | 👍 | -| [Create a policy with net handler, description, host specification, bpf filter and pcap source](policies/create_policy_with_net_handler.md) | ✅ | | 👍 | 👍 | -| [Create a policy with dhcp handler, description, host specification, bpf filter and pcap source](policies/create_policy_with_dhcp_handler.md) | ✅ | | 👍 | 👍 | -| [Create policy with multiple handlers](policies/create_policy_with_multiple_handlers.md) | | | | 👍 | -| [Test policy filters](policies/test_policy_filters.md) | | | | -| [Check policies details](policies/check_policies_details.md) | | | | 👍 | -| [Edit a policy through the details modal](policies/edit_a_policy_through_the_details_modal.md) | | | | 👍 | -| [Edit policy name](policies/edit_policy_name.md) | ✅ | | 👍 | 👍 | -| [Edit policy host_specification](policies/edit_policy_host_specification.md) | ✅ | | 👍 | 👍 | -| [Edit policy bpf_filter_expression](policies/edit_policy_bpf_filter_expression.md) | ✅ | | 👍 | 👍 | -| [Edit policy pcap_source](policies/edit_policy_pcap_source.md) | ✅ | | 👍 | 👍 | -| [Edit policy only_qname_suffix](policies/edit_policy_only_qname_suffix.md) | ✅ | | 👍 | 👍 | -| [Edit policy only_rcode](policies/edit_policy_only_rcode.md) | ✅ | | 👍 | 👍 | -| [Edit policy description](policies/edit_policy_description.md) | ✅ | | | 👍 | -| [Edit policy handler](policies/edit_policy_handler.md) | ✅ | | 👍 | 👍 | -| [Check if is possible cancel operations with no change](policies/check_if_is_possible_cancel_operations_with_no_change.md) | | | | | -| [Remove policy using correct name](policies/remove_policy_using_correct_name.md) | ✅ | | 👍 | 👍 | -| [Remove policy using incorrect name](policies/remove_policy_using_incorrect_name.md) | | | | 👍 | -| [Create duplicated net policy without insert new name](policies/create_duplicated_net_policy_without_insert_new_name.md) | ✅ | | 👍 | 👍 | -| [Create duplicated dhcp policy without insert new name](policies/create_duplicated_dhcp_policy_without_insert_new_name.md) | ✅ | | 👍 | 👍 | -| [Create duplicated dns policy without insert new name](policies/create_duplicated_dns_policy_without_insert_new_name.md) | ✅ | | 👍 | 👍 | -| [Create 4 duplicated policy with new name](policies/create_4_duplicated_policy_with_new_name.md) | ✅ | | 👍 | 👍 | -| [Create 3 duplicated dns policy without insert new name and 1 with new name](policies/create_3_duplicated_dns_policy_without_insert_new_name_and_1_with_new_name.md) | ✅ | | 👍 | 👍 | - - ---------------------------------- -## **DATASETS** - - -| Datasets Scenario | Automated via API | Automated via UI | Smoke | Sanity | -|:--------------------------------------------------------------------------------------------------------------------------:|:-----------------:|:----------------:|:-----:|:------:| -| [Check if total datasets on datasets' page is correct](datasets/check_if_total_datasets_on_datasets'_page_is_correct.md) | | | | | -| [Create dataset with invalid name (regex)](datasets/create_dataset_with_invalid_name_(regex).md) | | | | 👍 | -| [Create dataset](datasets/create_dataset.md) | ✅ | | 👍 | 👍 | -| [Check datasets details](datasets/check_datasets_details.md) | | | | 👍 | -| [Check if is possible cancel operations with no change](datasets/check_if_is_possible_cancel_operations_with_no_change.md) | | | | | -| [Test datasets filter](datasets/test_datasets_filter.md) | | | | | -| [Edit a dataset through the details modal](datasets/edit_a_dataset_through_the_details_modal.md) | | | | | -| [Edit dataset name](datasets/edit_dataset_name.md) | | | | 👍 | -| [Edit dataset sink](datasets/edit_dataset_sink.md) | | | | 👍 | -| [Remove dataset using correct name](datasets/remove_dataset_using_correct_name.md) | ✅ | | 👍 | 👍 | -| [Remove dataset using incorrect name](datasets/remove_dataset_using_incorrect_name.md) | | | | 👍 | diff --git a/python-test/docs/img/ORB-logo-ring.png b/python-test/docs/img/ORB-logo-ring.png deleted file mode 100644 index 99a9f94e033552044cb2f938de70fe30e34ca948..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10902 zcmY*<2Ut_f7H$$EBp{H0ARs~rO{s!NFOl9sIsz&P(tDGl1da6GdzapubUBC+kSbN0 zROu*)6a{=coO|zk4?aHjo;|Z>ty%wSGZU(%cMhvvr z<>TFee_(eVC0S7A5aTBBqHSfQ@=QYm#05OVKoBq$h=|Yy_yq;qf}oerAP@)e9|R)G z0sr5xIgtN;1)Jv({qOk+p(CB*S3`h^v8|qwhmnT5_)}*`K66WF3oE|ojxK}_Aj#+A zz@wv;hdJuGql1&X_;V@rWe;)Snb6FSMqPICu$Mv`X=tP5oZYNY!hCo5?x3YfQ7Dw8 zo8>cc9eKt73+B-rnAP-go(&-K_Zq#Kgq-?+Ee>3i1Lyc-?)RJj|c- zI=Qp{OXRH?|92)Q_y33m2*^+P z!Y{yghyQ=HfuWLwR&gyiTPt8@0)1%#$;+PqU)z7?k>n>#{{JxZucVi)fT+@>lKlT$ zHfd7&M@u9i(5+|{`TKg$!MmAS5j4G-y*Gl{E{OWX{i^A?(_hC1$*GH8^#j%Jp9tE7 zJ~2GPhmqijk^{@{&Ah&+s3<0ndL>$}7$tB$-Y7+WvU}!Um|+&#$_^yhib8llF!l$_ z4f>rAJ+99fB|>-Koe>|WzjXesb-r7 zw0^KAl6OO7VSCcSX2WFk$B#yv;jB^p`S1)33Ivgi0SiI+npmoxzsg%X?G2hfwV#}$ zxheWX)FMMIG~J2{gZor4KT=z;E+!Meo0c8mR}zHwxFK8~t(l$0wR<{KYo%oEx-Vzd zCE*<BL$KKy|Y1|^Ik_DW~7 z``yvhyJI(}?l?e={9|*kO-Y+mLd2mPS2jk_j@NXCf1d1Hi(3zOILvcN`+YEOvz$B2 z4Q>?RbDa16C4$jvx^OhIBbVLs>HpNJO}T0AJa_Wsn|5fMe8OXrxSn2qoObi#lqw|T zhbo6@9||Qk9AVO*Je>VLC!_J!lEn8mlZ1k}Kl%B8B+%8Ry`j{@ihR?hNlGo%FjSX$ zgVM-C(}vHMIxF|9n(At!go49A?+Ul)(@KkUo5Ia$G7pq*P=Mt{Ga6wF@nwb|exLHY z$I3}~_;#Lmd?FjZu=jVC2Fo{{I_{JQ@%h#8WiZe+O}(MF7G7n~h^!eonHhi6wgqLl zC$XF#|9;Qo;^e{j>0isM?_2*i{K-CPtonL0RJ3ZLMY?5W z(fid|G$PzN4WzcV))Ep(DbO+Tb0B&!XATrar7BftRD5ko6|3K`qrw^(EIOlrQqJwO zd-bMfJsBrLJ74ZmSy}bFe|I$5OA=8bc4~yk@VKsmH`SX`3v%@HC9(+_=*Ofj(%tI6WnKtu@dtl5N5GCg&i%O&Fdm!x3IR1rQ}zA29DZ^b z?3}$pVV*D=As9R^kNqgp=Y6oMm3eBusF?_kG3ZrC^&Z-VsxFX*$^ZDobj`#O64oa>}XMsi<^T4$g&dM!1PZONN1t>%O z*Q<%T&HT)AMs%GwB#~0d|Hphb54ps+KKAXKZg2X+n3g6d0Yf=> z(e9gAs&SkD$6m_Zojl8GY z^`)-?E86R0?_gAEm0CI{gNYw|HRdVEaecJrLYW2nBjVkD1m ztbVM@U(sW&oce5LW~SfAL%l^8p8_HwJWI}B|B7%AamYCT5!g5y`u+3gd##G1ed3Dm2_4hQ787%;jm){xzl4!XMyVx8E-@VGJ{V+4`tD zR-a<(1FG{Um7+wk@6roV*`5G7W0zdgk4oa^sDKD{2WHV9O)8B}OzBr>%~&(WgRqW5 z!t>@4Gg$cegDNx+WG-m`@kHZ2nq?_JMZ;9h$AW`J4>>=-QRtkYxG7yI6r{k#=-d{1 z2a>UXFO=mO?Xa9k9b(y(3#Tevk+J;+Z<;=rf3N2@jrf7?5|j#Oa`CE5A+70#C!J<+ z1rLxN(HI)N_5X|kLF}Lz;nfj-JvF`)rZ(1GNDJfYJQKfEZGPt}g@`|XJ%Wd*skM&s zh0hHdzkgH57++`ng-;a2Kvmkcgd$2FX#jQ>~pi=m& z;L3+V=M_Qw;#N0JXbSy3tHzOv+H$|Ht8z{DLU4!gLkvkXnivRC3EyNaMi%CBPb)%S zDypoc9NoN}2Y8w9_mbrHs&^Lc) zoTH5|`@z7l6Z8{F#w9MB=BluXiORM`A@W1rDe)B%=#2W42HQ1b8f(=`(d_cV6)c7t zmLYccg}Zk&*}PHz!GV@A*Atj!vuEVXBUOGnv^6YRZ6WIQS-nYEYw5yI zGRtH=p#c=?amA=SOG1RYIqY@$H!Gu(dW94Istb;C2BGP6nZIWN45kvLlR^+G*rOtn zjPiid5d}fVv7rWql(oJ!DMiPc1#}AEdwl#+zqIm?+<+tLE(?K6( zX7G4zBzU%c_uIFHa{N!b@CmA=_h;)S6TY)upXo1b&GVFzz!S(e6E95upnt1C0Jj|RgvN{$HX+Y)hbOp4#RM67c<;7P! zTa@V(2|)lKpwc?GVc(A)NdLW+MdVdk870MDhNNSdDM+eDO=L;sE0W%-vs7MGy4zk`(Z2;58?cIPetUDySY2-de22e(I>)B(kHD ztt&#s;O|xc-;)v(5`iQLJd4Zk!*Q;$%(U6IcbRdpfTQ>Chwl$p2*UhTZzKe!3i815 zNvbUz@=ddC4RL9(I)8)XX1A;?8Qf5;#RzpEx8WnhJ)|1tw0x6P^P^B>c4TGuUD92a z&Yc7%Mt^t)7PftIvT)1(xoo##cgvvEViP>^r8q)Hv`2rJbN@}*n${-b$9-&8HD&2E z4g^KzD1(p&Pz6C4V6W0Z!XRC2K@CA*v1qWo5%O9e3IyO0g@yge3DY2S345m*rP6`RYq0S_Q;i?lH}ui5^(xcJ+~?EwDP6EUw& zlX*+5&muDHVNsP4t^FZ*$-WGZH&Cwq;J!edUn|IesPA)CX?#_nV|Uwos#|vshYf%b z@|q&e_89KY<&S4)v!gwcl*BiQ4ODPQ&|gaZ2k>|2^HX7QF3gY$6biz#T9#{h zfHvyiCD%XBA#Zc}XwweEBXE_V-g|REW*8%7!H&FGZv5pcby6PEdA-Buu>!j;544K~ zC5I>sj=0Gz?aw31VnDSdkv&s+2pPUcaT*4nbNwMve_wz@M<@k5@T$6E;q>Qq2My8T zDz>g6wT~z$uuvKhLGr;Pe7cIc{jAXYTG&swp=&iSI{5wFI~QQIC~bF0pHJt#D%I#e z0N$EhDBtP4@9fZ4Ie_%>W$_|5)=JMZpY{wqc1fcZT?_1QUQQF~~us2WGS zjtD{DKqfwsW?`B^4-Bsyr0su-K7Zl!<^dJdPWFkzj=7`ORCzP^w2hsjnY*lsoeYs8 zJQ9_9+?^APGj57tj*k9L-blhI;WSR2G;kR76!%hk^U9y4u!(O}F)(|bRvNuKxYOt%L zM|{iz3Uq)G{5l-u7j2*V3EB3J6Z$5AM+-gOqAd(+)( z6^F@$i*CZnKtb#S%tY{+3z5I)1$B*gj&T_XEanR|fP$UTJPmRo zhK&M{wm7&U3PBne8d@Y_SoJ8}Y3{jmAA4wbqxpy5Hg;RZA{B?vM1#T*y=-|f@S|t{ z&_iX>cLWb?IB6Gn-ur)KKjQ1s$^H0R=KI;{_2jy($*^sh3wK(o>SRnOV3>SwI`urw zmg&k0^;Oo#jUELC{NA_EjE)*!Ww+gtN@5jhv@J0j1PUt`Q?q1Q(==G`&^ueJn`8pZ z2J6(2C8Rd}EBwL3o-jw@n;J8o(h9t`lDnp~oOr?cLgv~l*T@9Y7W&7lhM-|R*q>X^ z;@^FB+C=kEC}-db`SfBIs>&J9wyG}P9zLc&kNo5f!=N%dH#S~YxGoBD|2fhynnkoG zk}1SDs>ChuFQ*?hxsgN6k#}yA?Nz-AuUVQnSNwFr6h2AFxle&KH`%N${-EWb>43Fv zMnx1TkQP~NDT9 zPLWNa4O^u?(S6?{*$X#tHe*(Cu-rE20$enLCy>{~%Y*j1jL?DeU|MUQTJj^$Jfvm> z-9(oz2XcLggjpeb?%GD%O8D#~|GZ|D-1^&fCMarz2Vy;>?hTmoWU-Ex^yzD$I)A|V zd$T*_0WTg3hGB9nAV3h_&X6d&KK5*VD0TxMD>qa>mYN%8tRqSAg{%-xt&4j1t#L9{ zpWfbMMH0%FUrU_C?-lBiQ2{{|0gJ&Nh5;ok;#V~Fr<1BSy!)3w8U|mLaaKHQAFl^| ziImP35*0eE*f`w0&DyFWks${RvLOOH*65M{4WfIuw4WOVL@8b{FLILHcgbD?F{>KS z3`u@AK54MKq7WiR2yFmFuyL^A z!S>WFll{^NR&EY{H#4{7b&-)HUg9+#&4a{rW$_qZ=)3CCutbqkXh-3(hnxL@w zpZ$(<(~SOawEa)J`rpPO2&+W|$5#d11k*yP0(E4;>q{8i&kYw!f|Tn}J#WiB5)N4& z$U=bBMHH-;jevSo%>5@8%bV)RLHuWTV`0T1al{gjpebScA7X*PzKj9Sfft7?u3{vP z9w38d0Q;0ACs83a^SdrrG@BZKV|XHvP1kpfLS8hIbn22pc@UBhF7y#t-(PH~vPFNU zkZP@#=^zl)POEc*bTKzqql5>sq{m}Q;GkN)Q@F6jsQ+PC1dQMP+f{-bT;SQ1EuR}E z2njTe*mc6vA5z^Sof%7SIW}21atd+b=k6fD9M4v5^(jhop)0&T!Uw{$Ngt)0-*d^> zZ783aufC5v8NCu8)sx#0q$YTfoqz#jsAY2Q{(k#ti|26x2Xa5r?2UZ2F0~61m=^?{ zqpCL6Pc9iJnCCNVex8pXBIaq#S#b3 z)YulHn(IUN3=#7)`39BTCDnk|6^OX`hHIA^?yWN7``0J@{q%Q5;nGP&R*;NC~P zAp44RKq%erd77+3Dd_Itb`YvwG1xZhl`~Fo8(32CMC2XIug&#lp$K!D{(?Y&;TTzr zH|y0+`r~26?V%{;n&C^;Bk-svwa6fK{}?zzB|gN3kcu1kyQ*Pn`BN8gQf~2=gn-Y=ox8G)$WaCweE1 z=LisAky<%weMax__b>0{TGlS%Ykgz{6~%$j4)B;A!h{Y$9Uy#&2b$}nuw^miL`27Y zHBUd9ZZolPT?NCYsivFu@0Z=S(|O4Vip;8Zv?usSo15UVlWp4^#F7w~DM3#$a-CkR z;Q*4&wnJR5U*RNB9*0LnN|k4c#_GH}Ve3sqE3n3*Al1BkPW%Zt*J9;?-`Y6n*~wQaV;w4Ks6 zAd8WGU1R=V8^Dypq9Y_{nq61g=jMz}JXfDBb6jHT5V3(YhpWd%xPOTkTnl&jEq3>( zPIP2~rRghRUQpmwh!n)GmK4e|g!*Jf04yoAMR&Z>;fs0wRjnZ2<)#$UwOE4x2SKTz zN%BVW9Q2wOiolCp40=iEZUT6H7mi?YK z`v87v4q?!ir(I#2cjL|4!(i53OT_>3FeE>%;4J<@iSIJ_@qn$-JAxh0p4#dP5LkKv z%?NdA(^txV(LsPa7DExpj`SK0mvURx9`1_H5Ny73Nj&?~AaCv0+C>509^m*8?h(uy z6#Q;${X&vtpEY7M7QnuZSYQ?lZglkgCfuaNy;~w^Yn6o%Ho&ks^vEv!)70kXx?#yt z2z$pJ$iP!V4$v8e(b5vNwfou;Iy5(z-lwLverX!Rk=Nd%sjm};0<{tjoV3L*@fk| zZ41t-2XECBO#b=Rmhwrk%ab6zBPZ&6U)a%t`cu7Y&Nl&n|F2tp0sYLub;#a##wE2; zo*v>tOJ7M?yps%U1kr3Ke)~QEcuNA{>tA8_(2YX3sHwGzNX5_@isAMI78t34O@w*9 z@DLX+T0X+Ex~QSbL?U6WYY~@jy98RBr-AM&X=(yCYA~I=fq(W|MJ7<_jIcN1gz-sm zqQv4J9}<>e<*0rW<~u z)CQ%3)`j0LHBU*mqONMYA>s;yTOF)$z}3Mx;u z{%v8SrNe7CP1)nlHoK{a`Es{1uC7r!Y`4F_((6f-?GFMrfi1R1*mgwlwJUdryuu|2 zBF@~$sNp=JqCLStm2erOtK+b>uymM>{YL$7 z8ie5wltu1oI9>2Q8ZmbY1WGCpG)*Z-2+2gJOldPGSuhR*(1H~QdL<#8w!YQsQczS5 zcl6yP5aVSh5Np~ff9s{s;YC0uP7_E5ttQe3bE#bMJ<>h|Cp4ajJ<$G;#48~l9?r}# z5l`c;I8DfQ>#=}%WMuJgKfzqcve2b~=IN-l8KOce!gIEE>Tsam)qhhJO-valWBTR! z)?9Py$#$3)x~Kxvbp=>P7eYe)b-ZCJ@AUMY;d61xwtH!5;&uhe+!4Rbs)ZShpyK~n*N{d%vCa<~6xNDLa7<$`q0*38WIjR8tRo#^C-|BTAFP%az)vdQhB{!O- z;o-TVY~(`#=|bd>1?nVo*bN(n*K5L!mmxOC4zi$da&}5K>FI&|Gd2UiO0b9@n9f#M zukMJ0KjrPCA`K}**u~^zW4)1k{3ENa1DxLg-J?e2Aq@!g>+PEy!c3k(jM~!5p+)oD zWfQ%0v#}Un`R7j-7dUE*Gzm~ae@E}2zj5rdJ=Y=dLiK~Mt5HCNy_%AQ;C?UPAwwL` z4+aL*-j;9K+Y#yV0~mFHyoJwgY+MWh+iVYoDouUbh5ptvvI+p1{?$Mp$JocCgv7_x z!D_Xx_H9^(9os#0@u`EGgIo?hRL=sWrXe>KucIL9^c&b*a3=PAPZzNJa&nE|aLQa0 zm{k>4+|B;on`iv-WMYVm3}XkuFKe^M+&j}wkQrxkz0LnEY!TSTBVhZ~_?vbVtmq+2QK>Yvwq4_1XEIQwo9$PR2M|D@8;|+BHN;cH5}7 zL@N0LFuZ`;ad+|M-M%Sv5s?@(_5QR346Mh!z?raWGMAq}r%Y4+Aw(?jQ%J+LrIF=8 zhA~c70Ia@**U5JbAVBXuh~l@f)`zE!K&a^vVxs{P(k+#oB=inx!vO0l0o&T_fipc( zbije{;LxLbi+9$;HWlkCCi+ADASW5CjYZ&wjNn(+B-jRCqPj|pNP z%1j^iY95rGOUVXngcKH>uf~BPfyip)Sff`A18diQ-L27t%EtlTn+lluyxSi=7pWnh zY&kbpL@J^jBb=s1k(%~Kl|XfK0WUsd;60EHx*p9aHqK>M`YXld!D}}F^|zuiqv|jk zEx;Ygu^Z=qMs-EnX>t((KFS4ov&IRESZZmYy8w_EiN?Z2O2ta9Lbo(9w9fUr#k;j@ z2nN*!Vm0;U=PHE*J<~unD0I)WUZ0;<)`rZ#I14NJw{fMbR%9;XLTF`++j$9WVGeQz z@;1%UBjp1Oa20~fpZp~5Z5A{o@7bhN2#!N!-d41`cm%3`@Q}LNxO{X{lqv#lFL}@c z;VB%>dLb6~goEK@%r!^@`xAy?1+KYCjPxOnhS@ujB}7u!(G;H0``T}i{fQvY=(oSj zz?Z-L*CN5!Q)5Z$Zjm3ej6fl^%e#G?0f;`o*gUx))C^B&7YUNjSJi1j=H(f^Jy z;+3olTOj5x3?BqPuz0u-TA(2jpR0Sxl!-~QiXwjy-^NX^QH0c$7nG^9OI$R z;cOc~{~YkTg&w;j0$^{F`bmxAHZvGh{{{;+%}(DYk6}%pZ<%Rt<7G6PieG|V$pelT zoGT-?=Zm#jX2aO7DwSip-cZ4c2MnI93{o+m^I<`(f}L#qloXX(FV`3$4J;`4S^dTv zaV#P#Ch1}K@DJpTas|wwsZ2}@rP@>~sKqGk-ZDD$4;In7Qe_868n7h_4=M_5>j=L3 z4crE);033SKBZDuI>7%(cRXD+rO+^iq5a0hZ!Dku-qX zX_RaEQ`(H;Z(5pNwYK#8coW!ah#7S9tJ(-~Hvs0mEQEBq;!6| zXYK6Xu$8VM+Q)V){_)Gcvy3t%1~(ykCDG1ubyPF$gflZ?;ddZWv{G%5j8EpzZ`2sv z_CybGTvOJLKBY8Z>(pWQ`SfDByAC-zYd6{1G%F4XafwWuD+p$#{srfHoUiD$Si0+> z>iW+cauR*pH*onTjzJ#a#HCXDcy3s|F0LvF%H(U4-Y`{b&Z{H_rQ_!buJgRj(Y|Pz zyW^Q$@L;g{3~g*=6fs$8w(_EU;mBv{t*Os23~kC}zY=FTRJfbK0^E40TkM`bO|nif z7UJ(X`7~a!xVBbu`0&L5YqFbGrtd!?(gD?x)%4b~xX97O$G8;%E!(%kB&3GB;A5G=4&QDa&6%#OCVvZf zFI(xUJg#i1QonRFVCzEGKcpH~72)%2(9CIe*d(3>Ybl}PJl;#^S{rc^ZI%94o5M%% zMHI|%9zwd-`dnZ=KlyFliZcE3=V+ZrntcvXF-t{R`n;88!|-UCq;rQ8rho7A;D2== z$-J{ijw2VFEGl1w>~hHbe8#FQe)@FPUF@BsL}+wKwYJUhKjF=3^$okE9LX}SR{o=G z-^$AusACV`m|on$*O#|lw{X)+a{mJ}-79sf+&Ni}Fnb3a0lt%LH?B)v`2$?jaM?=M z=yVxtE>ob1adpp>xf(YajJSSU;`Tm3$ud#0poAZ|KBVYqeQ;Ml2xO|Lc`Pu>-IQ9o@onP4jQlcnpPy$I5Reu_t&E%Fd+Mj@x1+Q@y}9kiX5r7?U4 z#LSQEoWX5p?ygO>D_z$eP5|rqW4+S`H8x|1aF4zJO-V1o#Qe3j9`ebGvgtiz`gh^H zFOMt3vzp5ms<{`e29`J0@yA=R^2$mZ)kc<>U(6T%iOLr8(AU2}}LOMN8H<_4zgB3pC{kytK{V>qI;C{am-eTg1<&h$m#(iN#c&d80^iF(< zm$8uJ3uLSGipe#{{;j8CKD6AG!7A7+my}(qdPCu|W1vbGuSh5Jlasq>Jct!|behj9 zWfp2c`!}AR$8BDT(Yy#N@GTQ0W&S-R-59H4De_oKb9SNnQ+B1;VkUYEB2p%c2YS%EipnA00)JZovWe&!zmIPT5I$Q z%Cmeba+Vz40v}ZOb(^#xXgrW7B!$H^_jOzTP`NQ%2)$r7zEQB2eO}2(xaFy$pebJ| I`{dRC0W8@wk^lez diff --git a/python-test/docs/index.md b/python-test/docs/index.md deleted file mode 100644 index acf39516e..000000000 --- a/python-test/docs/index.md +++ /dev/null @@ -1,187 +0,0 @@ -## Login Scenarios: - -- [Request registration of a registered account using registered password username and company](login/request_registration_of_a_registered_account_using_registered_password_username_and_company.md) -- [Request registration of a registered account using registered password and username](login/request_registration_of_a_registered_account_using_registered_password_and_username.md) -- [Request registration of a registered account using registered password and company](login/request_registration_of_a_registered_account_using_registered_password_and_company.md) -- [Request registration of a registered account using registered password](login/request_registration_of_a_registered_account_using_registered_password.md) -- [Request registration of a registered account using unregistered password username and company](login/request_registration_of_a_registered_account_using_unregistered_password_username_and_company.md) -- [Request registration of a registered account using unregistered password and username](login/request_registration_of_a_registered_account_using_unregistered_password_and_username.md) -- [Request registration of a registered account using unregistered password and company](login/request_registration_of_a_registered_account_using_unregistered_password_and_company.md) -- [Request registration of a registered account using unregistered password](login/request_registration_of_a_registered_account_using_unregistered_password.md) -- [Request registration of an unregistered account with valid password and invalid email](login/request_registration_of_an_unregistered_account_with_valid_password_and_invalid_email.md) -- [Request registration of an unregistered account with valid password and valid email](login/request_registration_of_an_unregistered_account_with_valid_password_and_valid_email.md) -- [Request registration of an unregistered account with invalid password and valid email](login/request_registration_of_an_unregistered_account_with_invalid_password_and_valid_email.md) -- [Request registration of an unregistered account with invalid password and invalid email](login/request_registration_of_an_unregistered_account_with_invalid_password_and_invalid_email.md) -- [Check if email and password are required fields](login/check_if_email_and_password_are_required_fields.md) -- [Login with valid credentials](login/login_with_valid_credentials.md) -- [Login with invalid credentials](login/login_with_invalid_credentials.md) -- [Request password with registered email address](login/request_password_with_registered_email_address.md) -- [Request password with unregistered email address](login/request_password_with_unregistered_email_address.md) - - -## Agents Scenarios: -- [Check if total agent on agents' page is correct](agents/check_if_total_agent_on_agents'_page_is_correct.md) -- [Create agent without tags](agents/create_agent_without_tags.md) -- [Create agent with one tag](agents/create_agent_with_one_tag.md) -- [Create agent with multiple tags](agents/create_agent_with_multiple_tags.md) -- [Create agent with invalid name (regex)](agents/create_agent_with_invalid_name_(regex).md) -- [Create agent with duplicate name](agents/create_agent_with_duplicate_name.md) -- [Test agent filters](agents/test_agent_filters.md) -- [Check agent details](agents/check_agent_details.md) -- [Edit an agent through the details modal](agents/edit_an_agent_through_the_details_modal.md) -- [Edit agent name](agents/edit_agent_name.md) -- [Edit agent tag](agents/edit_agent_tag.md) -- [Save agent without tag](agents/save_agent_without_tag.md) -- [Insert tags in agents created without tags](agents/insert_tags_in_agents_created_without_tags.md) -- [Check if is possible cancel operations with no change](agents/check_if_is_possible_cancel_operations_with_no_change.md) -- [Remove agent using correct name](agents/remove_agent_using_correct_name.md) -- [Remove agent using incorrect name](agents/remove_agent_using_incorrect_name.md) -- [Edit agent name and tag](agents/edit_agent_name_and_tags.md) - - -## Agent Groups Scenarios: -- [Check if total agent groups on agent groups' page is correct](agent_groups/check_if_total_agent_groups_on_agent_groups'_page_is_correct.md) -- [Create agent group with invalid name (regex)](agent_groups/create_agent_group_with_invalid_name_(regex).md) -- [Create agent group with duplicate name](agent_groups/create_agent_group_with_duplicate_name.md) -- [Create agent group with description](agent_groups/create_agent_group_with_description.md) -- [Create agent group without description](agent_groups/create_agent_group_without_description.md) -- [Create agent group without tag](agent_groups/create_agent_group_without_tag.md) -- [Create agent group with one tag](agent_groups/create_agent_group_with_one_tag.md) -- [Create agent group with multiple tags](agent_groups/create_agent_group_with_multiple_tags.md) -- [Test agent groups filters](agent_groups/test_agent_groups_filters.md) -- [Visualize matching agents](agent_groups/visualize_matching_agents.md) -- [Check agent groups details](agent_groups/check_agent_groups_details.md) -- [Edit an agent group through the details modal](agent_groups/edit_an_agent_group_through_the_details_modal.md) -- [Check if is possible cancel operations with no change](agent_groups/check_if_is_possible_cancel_operations_with_no_change.md) -- [Remove agent group using correct name](agent_groups/remove_agent_group_using_correct_name.md) -- [Remove agent group using incorrect name](agent_groups/remove_agent_group_using_incorrect_name.md) -- [Run two orb agents on the same port](agents/run_two_orb_agents_on_the_same_port.md) -- [Run two orb agents on different ports](agents/run_two_orb_agents_on_different_ports.md) -- [Edit Agent Group name removing name](agent_groups/edit_agent_group_name_removing_name.md) -- [Edit agent group name](agent_groups/edit_agent_group_name.md) -- [Edit agent group description](agent_groups/edit_agent_group_description.md) -- [Edit Agent Group description removing description](agent_groups/edit_agent_group_description_removing_description.md) -- [Edit agent group tag](agent_groups/edit_agent_group_tag.md) -- [Edit Agent Group tags to subscribe agent](agent_groups/edit_agent_group_tags_to_subscribe_agent.md) -- [Edit Agent Group tags to unsubscribe agent](agent_groups/edit_agent_group_tags_to_unsubscribe_agent.md) -- [Edit Agent Group removing tags](agent_groups/edit_agent_group_removing_tags.md) -- [Edit Agent Group name, description and tags](agent_groups/edit_agent_group_name,_description_and_tags.md) - - -## Sink Scenarios: -- [Check if total sinks on sinks' page is correct](sinks/check_if_total_sinks_on_sinks'_page_is_correct.md) -- [Create sink with invalid name (regex)](sinks/create_sink_with_invalid_name_(regex).md) -- [Create sink with duplicate name](sinks/create_sink_with_duplicate_name.md) -- [Create sink with description](sinks/create_sink_with_description.md) -- [Create sink without description](sinks/create_sink_without_description.md) -- [Create sink without tags](sinks/create_sink_without_tags.md) -- [Create sink with tags](sinks/create_sink_with_tags.md) -- [Create sink with multiple tags](sinks/create_sink_with_multiple_tags.md) -- [Check if remote host, username and password are required to create a sink](sinks/check_if_remote_host,_username_and_password_are_required_to_create_a_sink.md) -- [Test sink filters](sinks/test_sink_filters.md) -- [Check sink details](sinks/check_sink_details.md) -- [Edit a sink through the details modal](sinks/edit_a_sink_through_the_details_modal.md) -- [Edit sink name](sinks/edit_sink_name.md) -- [Edit sink description](sinks/edit_sink_description.md) -- [Edit sink remote host](sinks/edit_sink_remote_host.md) -- [Edit sink username](sinks/edit_sink_username.md) -- [Edit sink password](sinks/edit_sink_password.md) -- [Edit sink tags](sinks/edit_sink_tags.md) -- [Check if is possible cancel operations with no change](sinks/check_if_is_possible_cancel_operations_with_no_change.md) -- [Remove sink using correct name](sinks/remove_sink_using_correct_name.md) -- [Remove sink using incorrect name](sinks/remove_sink_using_incorrect_name.md) - -## Policies Scenarios: -- [Check if total policies on policies' page is correct](policies/check_if_total_policies_on_policies'_page_is_correct.md) -- [Create policy with invalid name (regex)](policies/create_policy_with_invalid_name_(regex).md) -- [Create policy with no agent provisioned](policies/create_policy_with_no_agent_provisioned.md) -- [Create policy with duplicate name](policies/create_policy_with_duplicate_name.md) -- [Create policy with description](policies/create_policy_with_description.md) -- [Create policy without description](policies/create_policy_without_description.md) -- [Create policy with dhcp handler](policies/create_policy_with_dhcp_handler.md) -- [Create policy with dns handler](policies/create_policy_with_dns_handler.md) -- [Create policy with net handler](policies/create_policy_with_net_handler.md) -- [Create policy with multiple handlers](policies/create_policy_with_multiple_handlers.md) -- [Test policy filters](policies/test_policy_filters.md) -- [Check policies details](policies/check_policies_details.md) -- [Edit a policy through the details modal](policies/edit_a_policy_through_the_details_modal.md) -- [Edit policy name](policies/edit_policy_name.md) -- [Edit policy host_specification](policies/edit_policy_host_specification.md) -- [Edit policy bpf_filter_expression](policies/edit_policy_bpf_filter_expression.md) -- [Edit policy pcap_source](policies/edit_policy_pcap_source.md) -- [Edit policy only_qname_suffix](policies/edit_policy_only_qname_suffix.md) -- [Edit policy only_rcode](policies/edit_policy_only_rcode.md) -- [Edit policy description](policies/edit_policy_description.md) -- [Edit policy handler](policies/edit_policy_handler.md) -- [Check if is possible cancel operations with no change](policies/check_if_is_possible_cancel_operations_with_no_change.md) -- [Remove policy using correct name](policies/remove_policy_using_correct_name.md) -- [Remove policy using incorrect name](policies/remove_policy_using_incorrect_name.md) -- [Create duplicated net policy without insert new name](policies/create_duplicated_net_policy_without_insert_new_name.md) -- [Create duplicated dhcp policy without insert new name](policies/create_duplicated_dhcp_policy_without_insert_new_name.md) -- [Create duplicated dns policy without insert new name](policies/create_duplicated_dns_policy_without_insert_new_name.md) -- [Create 4 duplicated policy with new name](policies/create_4_duplicated_policy_with_new_name.md) -- [Create 3 duplicated dns policy without insert new name and 1 with new name](policies/create_3_duplicated_dns_policy_without_insert_new_name_and_1_with_new_name.md) - - -## Datasets Scenarios: -- [Check if total datasets on datasets' page is correct](datasets/check_if_total_datasets_on_datasets'_page_is_correct.md) -- [Create dataset with invalid name (regex)](datasets/create_dataset_with_invalid_name_(regex).md) -- [Create dataset](datasets/create_dataset.md) -- [Check datasets details](datasets/check_datasets_details.md) -- [Check if is possible cancel operations with no change](datasets/check_if_is_possible_cancel_operations_with_no_change.md) -- [Test datasets filter](datasets/test_datasets_filter.md) -- [Edit a dataset through the details modal](datasets/edit_a_dataset_through_the_details_modal.md) -- [Edit dataset name](datasets/edit_dataset_name.md) -- [Edit dataset sink](datasets/edit_dataset_sink.md) -- [Remove dataset using correct name](datasets/remove_dataset_using_correct_name.md) -- [Remove dataset using incorrect name](datasets/remove_dataset_using_incorrect_name.md) - -## Integration Scenarios: -- [Check if sink is active while scraping metrics](integration/sink_active_while_scraping_metrics.md) -- [Check if sink with invalid credentials becomes active](integration/sink_error_invalid_credentials.md) -- [Check if after 30 minutes without data sink becomes idle](integration/sink_idle_30_minutes.md) -- [Provision agent before group (check if agent subscribes to the group)](integration/provision_agent_before_group.md) -- [Provision agent after group (check if agent subscribes to the group)](integration/provision_agent_after_group.md) -- [Provision agent with tag matching existing group linked to a valid dataset](integration/multiple_agents_subscribed_to_a_group.md) -- [Apply multiple policies to a group](integration/apply_multiple_policies.md) -- [Apply multiple policies to a group and remove one policy](integration/remove_one_of_multiple_policies.md) -- [Apply multiple policies to a group and remove all of them](integration/remove_all_policies.md) -- [Apply multiple policies to a group and remove one dataset](integration/remove_one_of_multiple_datasets.md) -- [Apply multiple policies to a group and remove all datasets](integration/remove_all_datasets.md) -- [Apply the same policy twice to the agent](integration/apply_policy_twice.md) -- [Delete sink linked to a dataset, create another one and edit dataset using new sink](integration/change_sink_on_dataset.md) -- [Remove one of multiples datasets that apply the same policy to the agent](integration/remove_one_dataset_of_multiples_with_same_policy.md) -- [Remove group (invalid dataset, agent logs)](integration/remove_group.md) -- [Remove sink (invalid dataset, agent logs)](integration/remove_sink.md) -- [Remove policy (invalid dataset, agent logs, heartbeat)](integration/remove_policy.md) -- [Remove dataset (check agent logs, heartbeat)](integration/remove_dataset.md) -- [Remove agent container (logs, agent groups matches)](integration/remove_agent_container.md) -- [Remove agent container force (logs, agent groups matches)](integration/remove_agent_container_force.md) -- [Remove agent (logs, agent groups matches)](integration/remove_agent.md) -- [Subscribe an agent to multiple groups created before agent provisioning](integration/subscribe_an_agent_to_multiple_groups_created_before_agent_provisioning.md) -- [Subscribe an agent to multiple groups created after agent provisioning](integration/subscribe_an_agent_to_multiple_groups_created_after_agent_provisioning.md) -- [Agent subscription to group after editing orb agent's tags](integration/agent_subscription_to_group_after_editing_agent's_tags.md) -- [Agent subscription to group with policies after editing orb agent's tags](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags.md) -- [Edit agent name and apply policies to then](integration/edit_agent_name_and_apply_policies_to_then.md) -- [Insert tags in agents created without tags and apply policies to group matching new tags.md](integration/insert_tags_in_agents_created_without_tags_and_apply_policies_to_group_matching_new_tags.md) -- [Agent unsubscription to group with policies after editing agent group's tags (editing tags after agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent subscription to group with policies after editing agent group's tags (editing tags after agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent unsubscription to group with policies after editing agent group's tags (editing tags before agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Agent subscription to group with policies after editing agent group's tags (editing tags before agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Agent unsubscription to group with policies after editing orb agent's tags and agent group's tags (editing tags after agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent subscription to group with policies after editing orb agent's tags and agent group's tags (editing tags after agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent unsubscription to group with policies after editing orb agent's tags and agent group's tags (editing tags before agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Agent subscription to group with policies after editing orb agent's tags and agent group's tags (editing tags before agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Remotely reset an agent with policies](integration/reset_agent_remotely.md) -- [Remotely reset an agent without policies](integration/reset_agent_remotely.md) - - -## Pktvisor Agent - -* Providing Orb-agent with sample commands -* Providing Orb-agent with configuration files -* Providing Orb-agent with advanced auto-provisioning setup -* Providing more than one Orb-agent with different ports -* Providing Orb-agent using mocking interface -* Providing a Orb-agent with a wrong interface -* Pull the latest orb-agent image, build and run the agent diff --git a/python-test/docs/integration/agent_subscription_to_group_after_editing_agent's_tags.md b/python-test/docs/integration/agent_subscription_to_group_after_editing_agent's_tags.md deleted file mode 100644 index 71487bd23..000000000 --- a/python-test/docs/integration/agent_subscription_to_group_after_editing_agent's_tags.md +++ /dev/null @@ -1,43 +0,0 @@ -## Scenario: Agent subscription to group after editing orb agent's tags (agent provisioned before group) -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create another group with different tags -4. Edit agent orb tags to match with second group - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is unsubscribed from the first group -- Agent logs must show that agent is subscribed to the second group - - -## Scenario: Agent subscription to group after editing orb agent's tags (agent provisioned after group) -Steps: -- -1. Create a group with tags -2. Provision an agent with same tags -3. Create another group with different tags -4. Edit agent orb tags to match with second group - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is unsubscribed from the first group -- Agent logs must show that agent is subscribed to the second group - - -## Scenario: Agent subscription to group after editing orb agent's tags (agent provisioned after groups) -Steps: -- -1. Create a group with tags -2. Create another group with different tags -3. Provision an agent with same tags as first group -4. Edit agent orb tags to match with second group - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is unsubscribed from the first group -- Agent logs must show that agent is subscribed to the second group diff --git a/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags.md b/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags.md deleted file mode 100644 index 315030372..000000000 --- a/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags.md +++ /dev/null @@ -1,40 +0,0 @@ -## Scenario: Agent subscription to group with policies after editing orb agent's tags -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Create another group with different tags -7. Create another policy and apply to the group -8. Edit agent orb tags to match with second group - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is unsubscribed from the first group -- Agent logs must show that agent is subscribed to the second group -- The container logs contain the message "policy applied successfully" referred to the policy applied to the second group -- The container logs that were output after all policies have been applied contains the message "scraped metrics for policy" referred to each applied policy - - -## Scenario: Agent subscription to multiple groups with policies after editing orb agent's tags -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Create another group with different tags -7. Create another policy and apply to the group -8. Edit agent orb tags to match with both groups - -Expected result: -- -- Agent heartbeat must show 2 group matching -- Agent logs must show that agent is unsubscribed from the first group -- Agent logs must show that agent is subscribed to the second group -- The container logs contain the message "policy applied successfully" referred to the policy applied to both groups -- The container logs that were output after all policies have been applied contains the message "scraped metrics for policy" referred to each applied policy diff --git a/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md b/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md deleted file mode 100644 index 13a90fbea..000000000 --- a/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Agent subscription to group with policies after editing orb agent's tags and agent group's tags (editing tags after agent provision) - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Edit groups' tags changing the value -7. Edit agent orb tags to match with new groups tags - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is unsubscribed from the group -- Agent logs must show that agent is resubscribed to the group diff --git a/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md b/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md deleted file mode 100644 index 2ce2b4f54..000000000 --- a/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Agent subscription to group with policies after editing orb agent's tags and agent group's tags (editing tags before agent provision) -Steps: -- -1. Create an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Edit groups' tags changing the value -7. Edit agent orb tags to match with new groups tags -8. Provision the agent - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is subscribed to the group diff --git a/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md b/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md deleted file mode 100644 index 2cb61a9cd..000000000 --- a/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Agent subscription to group with policies after editing agent group's tags (editing tags after agent provision) -Steps: -- -1. Provision an agent with tags -2. Create a group with different tags -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Edit groups' tags changing the value to match with agent - - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is subscribed to the group \ No newline at end of file diff --git a/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md b/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md deleted file mode 100644 index dc753c51d..000000000 --- a/python-test/docs/integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Agent subscription to group with policies after editing agent group's tags (editing tags before agent provision) -Steps: -- -1. Create an agent with tags -2. Create a group with different tags -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Edit groups' tags changing the value to match with agent -7. Provision the agent - - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is subscribed to the group \ No newline at end of file diff --git a/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md b/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md deleted file mode 100644 index febe7b6e2..000000000 --- a/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Agent unsubscription to group with policies after editing orb agent's tags and agent group's tags (editing tags after agent provision) -Steps: -- -1. Provision an agent with tags -2. Create a group with another tag -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Edit orb tags on agent using the same tag as the group -7. Edit groups' tags using a different one - - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is unsubscribed to the group diff --git a/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md b/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md deleted file mode 100644 index 7cebf43ea..000000000 --- a/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Agent unsubscription to group with policies after editing orb agent's tags and agent group's tags (editing tags before agent provision) -Steps: -- -1. Create an agent with tags -2. Create a group with another tag -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Edit orb tags on agent using the same tag as the group -7. Edit groups' tags using a different one -8. Provision the agent - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is unsubscribed to the group diff --git a/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md b/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md deleted file mode 100644 index d3cd831d4..000000000 --- a/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Agent unsubscription to group with policies after editing agent group's tags (editing tags after agent provision) -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as the agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Edit groups' tags changing the value - - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is unsubscribed to the group \ No newline at end of file diff --git a/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md b/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md deleted file mode 100644 index 8eb71955d..000000000 --- a/python-test/docs/integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Agent unsubscription to group with policies after editing agent group's tags (editing tags before agent provision) -Steps: -- -1. Create an agent with tags -2. Create a group with same tags as the agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Edit groups' tags changing the value -7. Provision the agent - - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is unsubscribed to the group \ No newline at end of file diff --git a/python-test/docs/integration/apply_multiple_policies.md b/python-test/docs/integration/apply_multiple_policies.md deleted file mode 100644 index b6f7299a7..000000000 --- a/python-test/docs/integration/apply_multiple_policies.md +++ /dev/null @@ -1,38 +0,0 @@ -## 1- Scenario: apply multiple advanced policies to agents subscribed to a group - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create multiple advanced policies (with filters, source pcap) -5. Create a dataset linking the group, the sink and one of the policies -6. Create another dataset linking the same group, sink and the other policy - -Expected result: -- -- All the policies must be applied to the agent (orb-agent API response) -- The container logs contain the message "policy applied successfully" referred to each policy -- The container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy -- Referred sink must have active state on response -- Datasets related to all existing policies have validity valid - - -## 2- Scenario: apply multiple simple policies to agents subscribed to a group - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 2 policies -5. Create a dataset linking the group, the sink and one of the policies -6. Create another dataset linking the same group, sink and the other policy - -Expected result: -- -- All the policies must be applied to the agent (orb-agent API response) -- The container logs contain the message "policy applied successfully" referred to each policy -- The container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy -- Referred sink must have active state on response -- Datasets related to all existing policies have validity valid \ No newline at end of file diff --git a/python-test/docs/integration/apply_policy_twice.md b/python-test/docs/integration/apply_policy_twice.md deleted file mode 100644 index b3ce45972..000000000 --- a/python-test/docs/integration/apply_policy_twice.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: apply twice the same policy to agents subscribed to a group - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Create another dataset linking the same group and policy (sink can be the same or a different one) - -Expected result: -- -- The policy must be applied to the agent (orb-agent API response) and two datasets must be listed linked to the policy -- The container logs contain the message "policy applied successfully" referred to the policy -- The container logs contain the message "scraped metrics for policy" referred to the policy -- All sinks linked must have active state on response -- Both datasets have validity valid \ No newline at end of file diff --git a/python-test/docs/integration/change_sink_on_dataset.md b/python-test/docs/integration/change_sink_on_dataset.md deleted file mode 100644 index 13141eb0b..000000000 --- a/python-test/docs/integration/change_sink_on_dataset.md +++ /dev/null @@ -1,20 +0,0 @@ -## Scenario: edit sink on dataset - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create 2 sinks -4. Create 1 policy -5. Create a dataset linking the group, one of the sinks and the policy -6. Wait for scraping metrics for policy -7. Edit the dataset changing the sink - -Expected result: -- -- The policy must be applied to the agent (orb-agent API response) -- The container logs contain the message "policy applied successfully" referred to the policy -- The container logs contain the message "scraped metrics for policy" referred to the policy -- Datasets have validity valid -- First applied sink must stop to receive data after the edition -- Second applied sink must start to receive data after the edition \ No newline at end of file diff --git a/python-test/docs/integration/edit_agent_name_and_apply_policies_to_then.md b/python-test/docs/integration/edit_agent_name_and_apply_policies_to_then.md deleted file mode 100644 index dc485b6ac..000000000 --- a/python-test/docs/integration/edit_agent_name_and_apply_policies_to_then.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Edit agent name and apply policies to then -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -8. Edit agent name - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is subscribed to the group -- The container logs contain the message "policy applied successfully" referred to the policy applied to the second group -- The container logs that were output after all policies have been applied contains the message "scraped metrics for policy" referred to each applied policy diff --git a/python-test/docs/integration/insert_tags_in_agents_created_without_tags_and_apply_policies_to_group_matching_new_tags.md b/python-test/docs/integration/insert_tags_in_agents_created_without_tags_and_apply_policies_to_group_matching_new_tags.md deleted file mode 100644 index 5a44f4905..000000000 --- a/python-test/docs/integration/insert_tags_in_agents_created_without_tags_and_apply_policies_to_group_matching_new_tags.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Insert tags in agents created without tags and apply policies to group matching new tags -## Steps: -1. Provision an agent without tags -2. Create a group with tags -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -8. Edit agent name and tags, using the same tag as the group - -Expected result: -- -- Agent heartbeat must show just one group matching -- Agent logs must show that agent is subscribed to the group -- The container logs contain the message "policy applied successfully" referred to the policy applied to the second group -- The container logs that were output after all policies have been applied contains the message "scraped metrics for policy" referred to each applied policy diff --git a/python-test/docs/integration/multiple_agents_subscribed_to_a_group.md b/python-test/docs/integration/multiple_agents_subscribed_to_a_group.md deleted file mode 100644 index c7522925c..000000000 --- a/python-test/docs/integration/multiple_agents_subscribed_to_a_group.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: multiple agents subscribed to a group - -Steps: -- -1. Provision an agent with tags -2. Provision another agent with same tags (use var env `ORB_BACKENDS_PKTVISOR_API_PORT` to change pktvisor port) -3. Create a group with same tags as agents -4. Create a sink -5. Create 1 policy -6. Create a dataset linking the group, the sink and the policy - -Expected result: -- -- The policy must be applied to both agents (orb-agent API response) -- The container logs contain the message "policy applied successfully" referred to the policy -- The container logs contain the message "scraped metrics for policy" referred to the policy -- Referred sink must have active state on response -- Dataset must have validity valid \ No newline at end of file diff --git a/python-test/docs/integration/provision_agent_after_group.md b/python-test/docs/integration/provision_agent_after_group.md deleted file mode 100644 index 281f1e494..000000000 --- a/python-test/docs/integration/provision_agent_after_group.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: provision an agent after create an agent group - -Steps: -- -1. Create a group with tags -2. Provision an agent with same tags as group - -Expected result: -- -- The orb-agent container logs contain the message "completed RPC subscription to group" -- Group has one agent matching -- Agent status is online \ No newline at end of file diff --git a/python-test/docs/integration/provision_agent_before_group.md b/python-test/docs/integration/provision_agent_before_group.md deleted file mode 100644 index 27726d5d8..000000000 --- a/python-test/docs/integration/provision_agent_before_group.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: provision an agent before create an agent group - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as the agent - -Expected result: -- -- The orb-agent container logs contain the message "completed RPC subscription to group" -- Group has one agent matching -- Agent status is online \ No newline at end of file diff --git a/python-test/docs/integration/remove_agent.md b/python-test/docs/integration/remove_agent.md deleted file mode 100644 index 36e9223db..000000000 --- a/python-test/docs/integration/remove_agent.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Remove agent - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create a policy -5. Create a dataset linking the group, the sink and the policy -6. Remove agent from orb - -Expected result: -- -- Orb-agent logs should not have any error -- Group must match 0 agents \ No newline at end of file diff --git a/python-test/docs/integration/remove_agent_container.md b/python-test/docs/integration/remove_agent_container.md deleted file mode 100644 index ac62b1aeb..000000000 --- a/python-test/docs/integration/remove_agent_container.md +++ /dev/null @@ -1,23 +0,0 @@ -## Scenario: Remove agent container - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create a policy -5. Create a dataset linking the group, the sink and the policy -6. Stop and remove orb-agent container - -Expected result: -- -- The orb-agent container logs contain: -``` -{"level":"info","ts":"time","caller":"pktvisor/pktvisor.go:390","msg":"pktvisor stopping"} -{"l/pktvisor.go:253","msg":"pktvisor stdout","log": "Shutting down"} -{"level":"info","ts":"time","caller":"pktvisor/pktvisor.go:253","msg":"pktvisor stdout","log": "policy [policy_name]": "stopping"} -{"level":"info","ts":"time","caller":"pktvisor/pktvisor.go:253","msg":"pktvisor stdout","log": "policy [policy_name]": "stopping input instance: "} -{"level":"info","ts":"time","caller":"pktvisor/pktvisor.go:253","msg":"pktvisor stdout","log": "policy [policy_name]": "stopping handler instance: "} -{"level":"info","ts":"time","caller":"pktvisor/pktvisor.go:253","msg":"pktvisor stdout","log": "exit with success"} -``` -- Logs should not have any error diff --git a/python-test/docs/integration/remove_agent_container_force.md b/python-test/docs/integration/remove_agent_container_force.md deleted file mode 100644 index 26daf78c0..000000000 --- a/python-test/docs/integration/remove_agent_container_force.md +++ /dev/null @@ -1,23 +0,0 @@ -## Scenario: Remove agent container without stop - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create a policy -5. Create a dataset linking the group, the sink and the policy -6. Remove orb-agent container - -Expected result: -- -- The orb-agent container logs contain: -``` -{"level":"info","ts":"time","caller":"pktvisor/pktvisor.go:390","msg":"pktvisor stopping"} -{"l/pktvisor.go:253","msg":"pktvisor stdout","log": "Shutting down"} -{"level":"info","ts":"time","caller":"pktvisor/pktvisor.go:253","msg":"pktvisor stdout","log": "policy [policy_name]": "stopping"} -{"level":"info","ts":"time","caller":"pktvisor/pktvisor.go:253","msg":"pktvisor stdout","log": "policy [policy_name]": "stopping input instance: "} -{"level":"info","ts":"time","caller":"pktvisor/pktvisor.go:253","msg":"pktvisor stdout","log": "policy [policy_name]": "stopping handler instance: "} -{"level":"info","ts":"time","caller":"pktvisor/pktvisor.go:253","msg":"pktvisor stdout","log": "exit with success"} -``` -- Logs should not have any error \ No newline at end of file diff --git a/python-test/docs/integration/remove_all_datasets.md b/python-test/docs/integration/remove_all_datasets.md deleted file mode 100644 index 9fbbf387d..000000000 --- a/python-test/docs/integration/remove_all_datasets.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: remove all datasets from an agent - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 2 policies -5. Create a dataset linking the group, the sink and one of the policies -6. Create another dataset linking the same group, sink and the other policy -7. Create third dataset applying one of the policies again -8. Remove all datasets - -Expected result: -- -- The agent's heartbeat shows that 0 policies are applied -- Container logs should inform that removed policy was stopped and removed -- Container logs that were output after removing dataset does not contain the message "scraped metrics for policy" referred to deleted policies anymore \ No newline at end of file diff --git a/python-test/docs/integration/remove_all_policies.md b/python-test/docs/integration/remove_all_policies.md deleted file mode 100644 index e090a5440..000000000 --- a/python-test/docs/integration/remove_all_policies.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: remove all policies from an agent - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 2 policies -5. Create a dataset linking the group, the sink and one of the policies -6. Create another dataset linking the same group, sink and the other policy -7. Remove both policies - -Expected result: -- -- The agent's heartbeat shows that 0 policies are applied -- Container logs should inform that removed policy was stopped and removed -- Container logs that were output after removing policies does not contain the message "scraped metrics for policy" referred to deleted policies anymore -- Datasets became "invalid" \ No newline at end of file diff --git a/python-test/docs/integration/remove_dataset.md b/python-test/docs/integration/remove_dataset.md deleted file mode 100644 index 1f6b276c7..000000000 --- a/python-test/docs/integration/remove_dataset.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: remove dataset from an agent - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -7. Remove the dataset - -Expected result: -- -- The agent's heartbeat shows that 0 policies are applied -- Container logs should inform that removed policy was stopped and removed -- Container logs that were output after removing dataset does not contain the message "scraped metrics for policy" referred to deleted policy anymore \ No newline at end of file diff --git a/python-test/docs/integration/remove_group.md b/python-test/docs/integration/remove_group.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/python-test/docs/integration/remove_one_dataset_of_multiples_with_same_policy.md b/python-test/docs/integration/remove_one_dataset_of_multiples_with_same_policy.md deleted file mode 100644 index 94bc79e9e..000000000 --- a/python-test/docs/integration/remove_one_dataset_of_multiples_with_same_policy.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: remove one of multiple datasets with same policy - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and the policy -6. Create another dataset linking the same group and policy (sink can be the same or a different one) -7. Remove one of the datasets - -Expected result: -- -- The agent's heartbeat shows that 1 policies are applied -- The orb agent container logs that were output after removing dataset contain the message "scraped metrics for policy" referred to the applied policy \ No newline at end of file diff --git a/python-test/docs/integration/remove_one_of_multiple_datasets.md b/python-test/docs/integration/remove_one_of_multiple_datasets.md deleted file mode 100644 index a899f579e..000000000 --- a/python-test/docs/integration/remove_one_of_multiple_datasets.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: remove one of multiple datasets - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 2 policies -5. Create a dataset linking the group, the sink and one of the policies -6. Create another dataset linking the same group, sink and the other policy -7. Remove one of the datasets - -Expected result: -- -- The agent's heartbeat shows that 1 policies are applied -- The orb-agent container logs should inform that removed policy was stopped and removed -- The orb-agent container logs that were output after removing dataset contain the message "scraped metrics for policy" referred to applied policy -- The orb-agent container logs that were output after removing dataset does not contain the message "scraped metrics for policy" referred to deleted policy anymore \ No newline at end of file diff --git a/python-test/docs/integration/remove_one_of_multiple_policies.md b/python-test/docs/integration/remove_one_of_multiple_policies.md deleted file mode 100644 index 2cc1fd3f8..000000000 --- a/python-test/docs/integration/remove_one_of_multiple_policies.md +++ /dev/null @@ -1,20 +0,0 @@ -## Scenario: remove one of multiple policies from an agent - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 2 policies -5. Create a dataset linking the group, the sink and one of the policies -6. Create another dataset linking the same group, sink and the other policy -7. Remove 1 policy - -Expected result: -- -- The agent's heartbeat shows that 1 policies are applied -- Container logs should inform that removed policy was stopped and removed -- Container logs that were output after removing policies does not contain the message "scraped metrics for policy" referred to deleted policy anymore -- The orb agent container logs that were output after removing policy contain the message "scraped metrics for policy" referred to the remained policy -- Dataset referred to removed policy became "invalid" -- Dataset referred to remained policy remains "valid" \ No newline at end of file diff --git a/python-test/docs/integration/remove_policy.md b/python-test/docs/integration/remove_policy.md deleted file mode 100644 index df75e29aa..000000000 --- a/python-test/docs/integration/remove_policy.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: remove policy from an agent - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and one of the policies -7. Remove the policy - -Expected result: -- -- The agent's heartbeat shows that 0 policies are applied -- Container logs should inform that removed policy was stopped and removed -- Container logs that were output after removing policies does not contain the message "scraped metrics for policy" referred to deleted policy anymore -- Datasets became "invalid" \ No newline at end of file diff --git a/python-test/docs/integration/remove_sink.md b/python-test/docs/integration/remove_sink.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/python-test/docs/integration/reset_agent_remotely.md b/python-test/docs/integration/reset_agent_remotely.md deleted file mode 100644 index fe29ee7d0..000000000 --- a/python-test/docs/integration/reset_agent_remotely.md +++ /dev/null @@ -1,39 +0,0 @@ -## Scenario: Reset agent remotely - -### Agent with policies applied: -## Steps: -- Provision an orb-agent and apply 2 policies to them -- Restart the agent through a POST request on `/agents/{agent_id}/rpc/reset` endpoint -- Check the logs and agent's view page - - -## Expected Result: -- this agent's heartbeat shows that 2 policies are successfully applied and has status running -- the container logs should contain the message "restarting all backends" within 5 seconds -- the container logs that were output after reset the agent contain the message "removing policies" within 5 seconds -- the container logs that were output after reset the agent contain the message "resetting backend" within 5 seconds -- the container logs that were output after reset the agent contain the message "pktvisor process stopped" within 5 seconds -- the container logs that were output after reset the agent contain the message "reapplying policies" within 5 seconds -- the container logs that were output after reset the agent contain the message "all backends were restarted" within 5 seconds -- the container logs that were output after reset the agent contain the message "completed RPC subscription to group" within 10 seconds -- the container logs that were output after reset the agent contain the message "policy applied successfully" referred to each applied policy within 10 seconds -- the container logs that were output after reset the agent contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - -____ -### Agent without policies applied: -## Steps: -- Provision an agent and subscribe them to a group -- Restart the agent through a POST request on `/agents/{agent_id}/rpc/reset` endpoint -- Apply 2 policies to this agent -- Check the logs and agent's view page - -## Expected Result: -- the container logs should contain the message "restarting all backends" within 5 seconds -- the container logs that were output after reset the agent contain the message "resetting backend" within 5 seconds -- the container logs that were output after reset the agent contain the message "pktvisor process stopped" within 5 seconds -- the container logs that were output after reset the agent contain the message "reapplying policies" within 5 seconds -- the container logs that were output after reset the agent contain the message "all backends were restarted" within 5 seconds -- the container logs that were output after reset the agent contain the message "completed RPC subscription to group" within 10 seconds -- the container logs that were output after reset the agent contain the message "policy applied successfully" referred to each applied policy within 10 seconds -- the container logs that were output after reset the agent contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds -- this agent's heartbeat shows that 2 policies are successfully applied and has status running \ No newline at end of file diff --git a/python-test/docs/integration/sink_active_while_scraping_metrics.md b/python-test/docs/integration/sink_active_while_scraping_metrics.md deleted file mode 100644 index a30b97921..000000000 --- a/python-test/docs/integration/sink_active_while_scraping_metrics.md +++ /dev/null @@ -1,35 +0,0 @@ -## Scenario: sink has active status while scraping metrics - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink with valid credentials -4. Create 1 policy -5. Create a dataset linking the group, the sink and one of the policies -6. Wait 1 minute - -Expected result: -- -- The container logs contain the message "scraped metrics for policy" referred to the applied policy -- Sink status must be "active" - -
-------------------------------------------------- - -## Advanced Scenario: sink has active status while scraping metrics - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink -4. Create 1 policy -5. Create a dataset linking the group, the sink and one of the policies -6. Keep sending data for 24 hours - - -Expected result: -- -- The container logs contain the message "scraped metrics for policy" referred to the applied policy -- Check if even after this time, sink status remains active \ No newline at end of file diff --git a/python-test/docs/integration/sink_error_invalid_credentials.md b/python-test/docs/integration/sink_error_invalid_credentials.md deleted file mode 100644 index d2dcb44fd..000000000 --- a/python-test/docs/integration/sink_error_invalid_credentials.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: sink has error status if credentials are invalid - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink with invalid credentials -4. Create 1 policy -5. Create a dataset linking the group, the sink and one of the policies -6. Wait 1 minute - -Expected result: -- -- The container logs contain the message "scraped metrics for policy" referred to the applied policy -- Sink status must be "error" \ No newline at end of file diff --git a/python-test/docs/integration/sink_idle_30_minutes.md b/python-test/docs/integration/sink_idle_30_minutes.md deleted file mode 100644 index d0dcf2b82..000000000 --- a/python-test/docs/integration/sink_idle_30_minutes.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: sink has idle status after 30 minutes without data - -Steps: -- -1. Provision an agent with tags -2. Create a group with same tags as agent -3. Create a sink with invalid credentials -4. Create 1 policy -5. Create a dataset linking the group, the sink and one of the policies -6. Wait 1 minute -7. Remove the dataset to which sink is linked -8. Wait 30 minutes - -Expected result: -- -- Sink status must be "idle" \ No newline at end of file diff --git a/python-test/docs/integration/subscribe_an_agent_to_multiple_groups_created_after_agent_provisioning.md b/python-test/docs/integration/subscribe_an_agent_to_multiple_groups_created_after_agent_provisioning.md deleted file mode 100644 index 2dafb31b1..000000000 --- a/python-test/docs/integration/subscribe_an_agent_to_multiple_groups_created_after_agent_provisioning.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Subscribe an agent to multiple groups created after agent provisioning - -## Steps: -1. Provision an agent with tags -2. Create a group with at least one tag equal to agent -3. Create another group with at least one tag equal to agent -4. Check agent's logs and agent's heartbeat - - -## Expected Result: -1 - Logs must display the message "completed RPC subscription to group" referred to both groups -2 - Agent's heartbeat must have 2 groups linked diff --git a/python-test/docs/integration/subscribe_an_agent_to_multiple_groups_created_before_agent_provisioning.md b/python-test/docs/integration/subscribe_an_agent_to_multiple_groups_created_before_agent_provisioning.md deleted file mode 100644 index 67ce5639d..000000000 --- a/python-test/docs/integration/subscribe_an_agent_to_multiple_groups_created_before_agent_provisioning.md +++ /dev/null @@ -1,11 +0,0 @@ -## Scenario: Subscribe an agent to multiple groups created before agent provisioning -## Steps: -1. Create a group with one tag -2. Create another group with 2 tags -3. Provision an agent with the same tags as the two groups -4. Check agent's logs and agent's heartbeat - - -## Expected Result: -1 - Logs must display the message "completed RPC subscription to group" referred to both groups -2 - Agent's heartbeat must have 2 groups linked \ No newline at end of file diff --git a/python-test/docs/login/check_if_email_and_password_are_required_fields.md b/python-test/docs/login/check_if_email_and_password_are_required_fields.md deleted file mode 100644 index 231a1bd7f..000000000 --- a/python-test/docs/login/check_if_email_and_password_are_required_fields.md +++ /dev/null @@ -1,45 +0,0 @@ -## Scenario: Check if email and password are required fields - -### Sub Scenarios: -## I - Check if email is a required field - -### Steps: - -1 - Request an account registration without email field - -- REST API Method: POST -- endpoint: /users -- body: `{"password":"password", "metadata":{"company":"company","fullName":"name"}}` - -### Expected Result: - -- The request must fail with bad request (error 400) and no account must be registered - -## II - Check if password is a required field - -### Steps: - -1- Request an account registration without password field - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"email", "metadata":{"company":"company","fullName":"name"}}` - -### Expected Result: - -- The request must fail with bad request (error 400) and no account must be registered - - -## III - Check if password and email are required fields - -### Steps: - -1 - Request an account registration using just metadata - -- REST API Method: POST -- endpoint: /users -- body: `{"metadata":{"company":"company","fullName":"name"}}` - -### Expected Result: - -- The request must fail with bad request (error 400) and no account must be registered \ No newline at end of file diff --git a/python-test/docs/login/login_with_invalid_credentials.md b/python-test/docs/login/login_with_invalid_credentials.md deleted file mode 100644 index 6e41c6219..000000000 --- a/python-test/docs/login/login_with_invalid_credentials.md +++ /dev/null @@ -1,45 +0,0 @@ -## Scenario: Login with invalid credentials - -### Sub Scenarios: -## I - Login with invalid email - -## Steps: - -1- Request authentication token using unregistered email and some registered password - -- REST API Method: POST -- endpoint: /tokens -- body: `{"email": "invalid_email", "password": "password"}` - - -## Expected Result: - - The request must fail with forbidden (error 403) and no token must be generated - -## II - Login with invalid password - -## Steps: - -1- Request authentication token using registered email and wrong password - -- REST API Method: POST -- endpoint: /tokens -- body: `{"email": "email", "password": "wrong_password"}` - - -## Expected Result: -- The request must fail with forbidden (error 403) and no token must be generated - -## III - Login with invalid email and invalid password - -## Steps: - -1- Request authentication token using unregistered email and unregistered password - -- REST API Method: POST -- endpoint: /tokens -- body: `{"email": "invalid_email", "password": "invalid_password"}` - - -## Expected Result: - -- The request must fail with forbidden (error 403) and no token must be generated \ No newline at end of file diff --git a/python-test/docs/login/login_with_valid_credentials.md b/python-test/docs/login/login_with_valid_credentials.md deleted file mode 100644 index 7dc352535..000000000 --- a/python-test/docs/login/login_with_valid_credentials.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Login with valid credentials - -## Steps: - -1- Request authentication token using registered email referred password - -- REST API Method: POST -- endpoint: /tokens -- body: `{"email": "email", "password": "password"}` - - - -## Expected Result: - -- Status code must be 200 and a token must be returned on response \ No newline at end of file diff --git a/python-test/docs/login/request_password_with_registered_email_address.md b/python-test/docs/login/request_password_with_registered_email_address.md deleted file mode 100644 index 7b911eee7..000000000 --- a/python-test/docs/login/request_password_with_registered_email_address.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Request password with registered email address -## Steps: - -1- On Orb auth page (`http://localhost/auth/login`) click in **"Forgot Password?"** - -2- On Orb request password page (`https://orb.live/auth/request-password`) insert a registered email on -"Email address" field - -3- Click on **"REQUEST PASSWORD"** button - -## Expected Result: - -- UI must inform that an email was sent to enable user to change the password -- User must receive an email with valid link to reset account password \ No newline at end of file diff --git a/python-test/docs/login/request_password_with_unregistered_email_address.md b/python-test/docs/login/request_password_with_unregistered_email_address.md deleted file mode 100644 index 4772a6d69..000000000 --- a/python-test/docs/login/request_password_with_unregistered_email_address.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Request password with unregistered email address -## Steps: - -1- On Orb auth page (`http://localhost/auth/login`) click in **"Forgot Password?"** - -2- On Orb request password page (`https://orb.live/auth/request-password`) insert a unregistered email on -"Email address" field - -3- Click on **"REQUEST PASSWORD"** button - -## Expected Result: - -- UI must inform that an error has occurred -- No email must be sent diff --git a/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password.md b/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password.md deleted file mode 100644 index d5b05e30f..000000000 --- a/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Request registration of a registered account using registered password - -## Steps: - -1 - Request an account registration using an already registered email and same registered password - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"already_registered_email", "password":"registered_password"}` - -## Expected Result: - -- The request must fail with conflict (error 409), response message must be "email already taken" -- No changes should be made to the previously registered account - (name, company and password must be the ones registered for the first time) - diff --git a/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_and_company.md b/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_and_company.md deleted file mode 100644 index 612b5bfcd..000000000 --- a/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_and_company.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Request registration of a registered account using registered password and company -## Steps: - -1 - Request an account registration using an already registered email, same registered password and company field filled - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"already_registered_email", "password":"registered_password", "metadata":{"company":"company"}}` - -## Expected Result: - -- The request must fail with conflict (error 409), response message must be "email already taken" -- No changes should be made to the previously registered account - (name, company and password must be the ones registered for the first time) - diff --git a/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_and_username.md b/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_and_username.md deleted file mode 100644 index b1984cb9b..000000000 --- a/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_and_username.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Request registration of a registered account using registered password and username -## Steps: - - -1 - Request an account registration using an already registered email, same registered password and fullname field filled - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"already_registered_email", "password":"registered_password", "metadata":{"fullName":"name"}}` - -## Expected Result: - -- The request must fail with conflict (error 409), response message must be "email already taken" -- No changes should be made to the previously registered account - (name, company and password must be the ones registered for the first time) - diff --git a/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_username_and_company.md b/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_username_and_company.md deleted file mode 100644 index 32a3e5696..000000000 --- a/python-test/docs/login/request_registration_of_a_registered_account_using_registered_password_username_and_company.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Request registration of a registered account using registered password, username and company -## Steps: - -1 - Request an account registration using an already registered email, registered password and fullname and company field filled - - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"already_registered_email", "password":"registered_password", "metadata":{"company":"company","fullName":"name"}}` - -## Expected Result: - -- The request must fail with conflict (error 409), response message must be "email already taken" -- No changes should be made to the previously registered account -(name, company and password must be the ones registered for the first time) diff --git a/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password.md b/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password.md deleted file mode 100644 index f91bce6f8..000000000 --- a/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Request registration of a registered account using unregistered password -## Steps: - -1 - Request an account registration using an already registered email and password different from registered - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"already_registered_email", "password":"unregistered_password"}` - -## Expected Result: - -- The request must fail with conflict (error 409), response message must be "email already taken" -- No changes should be made to the previously registered account - (name, company and password must be the ones registered for the first time and the new password should not give access to the account) diff --git a/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_and_company.md b/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_and_company.md deleted file mode 100644 index ba10f773c..000000000 --- a/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_and_company.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Request registration of a registered account using unregistered password and company -## Steps: - -1 - Request an account registration using an already registered email, password different from registered and company field filled - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"already_registered_email", "password":"unregistered_password", "metadata":{"company":"company"}}` - -## Expected Result: - -- The request must fail with conflict (error 409), response message must be "email already taken" -- No changes should be made to the previously registered account - (name, company and password must be the ones registered for the first time and the new password should not give access to the account) \ No newline at end of file diff --git a/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_and_username.md b/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_and_username.md deleted file mode 100644 index 09c528c29..000000000 --- a/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_and_username.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Request registration of a registered account using unregistered password and username -## Steps: - -1 - Request an account registration using an already registered email, password different from registered and fullname field filled - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"already_registered_email", "password":"unregistered_password", "metadata":{"fullName":"name"}}` - -## Expected Result: - -- The request must fail with conflict (error 409), response message must be "email already taken" -- No changes should be made to the previously registered account - (name, company and password must be the ones registered for the first time and the new password should not give access to the account - ) diff --git a/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_username_and_company.md b/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_username_and_company.md deleted file mode 100644 index 460c6cf19..000000000 --- a/python-test/docs/login/request_registration_of_a_registered_account_using_unregistered_password_username_and_company.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Request registration of a registered account using unregistered password, username and company -## Steps: - -1 - Request an account registration using an already registered email, unregistered password and fullname and company field filled - - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"already_registered_email", "password":"unregistered_password", "metadata":{"company":"company","fullName":"name"}}` - -## Expected Result: - -- The request must fail with conflict (error 409), response message must be "email already taken" -- No changes should be made to the previously registered account - (name, company and password must be the ones registered for the first time and the new password should not give access to the account) diff --git a/python-test/docs/login/request_registration_of_an_unregistered_account_with_invalid_password_and_invalid_email.md b/python-test/docs/login/request_registration_of_an_unregistered_account_with_invalid_password_and_invalid_email.md deleted file mode 100644 index c6a0147c4..000000000 --- a/python-test/docs/login/request_registration_of_an_unregistered_account_with_invalid_password_and_invalid_email.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Request registration of an unregistered account with invalid password and invalid email -## Steps: - -1 - Request an account registration using an email without `@server` and password with length less than 8 - - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"invalid_email", "password":"invalid_password"}` - -## Expected Result: - -- The request must fail with bad request (error 400) -- No account must be registered \ No newline at end of file diff --git a/python-test/docs/login/request_registration_of_an_unregistered_account_with_invalid_password_and_valid_email.md b/python-test/docs/login/request_registration_of_an_unregistered_account_with_invalid_password_and_valid_email.md deleted file mode 100644 index a56456c3c..000000000 --- a/python-test/docs/login/request_registration_of_an_unregistered_account_with_invalid_password_and_valid_email.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Request registration of an unregistered account with invalid password and valid email -## Steps: - -1 - Request an account registration using a valid email and password with length less than 8 - - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"email", "password":"invalid_password"}` - -## Expected Result: - -- The request must fail with bad request (error 400) and response message must be "password does not meet the requirements" -- No account must be registered \ No newline at end of file diff --git a/python-test/docs/login/request_registration_of_an_unregistered_account_with_valid_password_and_invalid_email.md b/python-test/docs/login/request_registration_of_an_unregistered_account_with_valid_password_and_invalid_email.md deleted file mode 100644 index 042fc12ad..000000000 --- a/python-test/docs/login/request_registration_of_an_unregistered_account_with_valid_password_and_invalid_email.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Request registration of an unregistered account with valid password and invalid email -## Steps: - -1 - Request an account registration using an email without `@server` and password with length greater than or equal to 8 - - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"invalid_email", "password":"password"}` - -## Expected Result: - -- The request must fail with bad request (error 400) -- No account must be registered \ No newline at end of file diff --git a/python-test/docs/login/request_registration_of_an_unregistered_account_with_valid_password_and_valid_email.md b/python-test/docs/login/request_registration_of_an_unregistered_account_with_valid_password_and_valid_email.md deleted file mode 100644 index 5532dc71e..000000000 --- a/python-test/docs/login/request_registration_of_an_unregistered_account_with_valid_password_and_valid_email.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Request registration of an unregistered account with valid password and valid email -## Steps: - -1 - Request an account registration using a valid email and valid password - - -- REST API Method: POST -- endpoint: /users -- body: `{"email":"email", "password":"password"}` - -## Expected Result: - -- The request must be processed successfully (status code 201) -- The new account must be registered -- User must be able to access orb using email and password registered \ No newline at end of file diff --git a/python-test/docs/policies/check_if_is_possible_cancel_operations_with_no_change.md b/python-test/docs/policies/check_if_is_possible_cancel_operations_with_no_change.md deleted file mode 100644 index dcbdec035..000000000 --- a/python-test/docs/policies/check_if_is_possible_cancel_operations_with_no_change.md +++ /dev/null @@ -1,22 +0,0 @@ -## Scenario: Check if is possible cancel operations with no change -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - On policies' page (`orb.live/pages/datasets/policies`) click on edit button - -3 - Change policy's name - -4 - Change policy's description and click "next" - -5 - Change policy's tap configuration options and filter and click "next" - -6 - Change policy's handler - -7 - Click "back" until return to policies' page - -## Expected Result: -- No changes must have been applied to the policy diff --git a/python-test/docs/policies/check_if_total_policies_on_policies'_page_is_correct.md b/python-test/docs/policies/check_if_total_policies_on_policies'_page_is_correct.md deleted file mode 100644 index d4246fa79..000000000 --- a/python-test/docs/policies/check_if_total_policies_on_policies'_page_is_correct.md +++ /dev/null @@ -1,20 +0,0 @@ -## Scenario: Check if total policies on policies' page is correct -## Steps: -1 - Create multiple policies - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - Get all existing policies - -- REST API Method: GET -- endpoint: /policies/agent/ - -3 - On policies' page (`orb.live/pages/datasets/policies`) check the total number of policies at the end of the policies table - -4 - Count the number of existing policies - -## Expected Result: -- Total policies on API response, policies page and the real number must be the same - diff --git a/python-test/docs/policies/check_policies_details.md b/python-test/docs/policies/check_policies_details.md deleted file mode 100644 index 3d64aa53b..000000000 --- a/python-test/docs/policies/check_policies_details.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Check policies details -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - Get a policy - -- REST API Method: GET -- endpoint: /policies/agent/ - -## Expected Result: -- Status code must be 200 and policy name, description, backend, input details and handler must be returned on response diff --git a/python-test/docs/policies/create_3_duplicated_dns_policy_without_insert_new_name_and_1_with_new_name.md b/python-test/docs/policies/create_3_duplicated_dns_policy_without_insert_new_name_and_1_with_new_name.md deleted file mode 100644 index bb4c9337d..000000000 --- a/python-test/docs/policies/create_3_duplicated_dns_policy_without_insert_new_name_and_1_with_new_name.md +++ /dev/null @@ -1,25 +0,0 @@ -## Scenario: Create 3 duplicated dns policy without insert new name and 1 with new name - -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - Duplicate this policy 3 times - -- REST API Method: POST -- endpoint: /policies/agent/{policy_id}/duplicate -- header: {authorization:token} - -3 - Duplicate this policy 1 more time inserting new name - -- REST API Method: POST -- endpoint: /policies/agent/{policy_id}/duplicate -- header: {authorization:token} -- body: {"name": "new name"} - -## Expected Result: - -- All requests must have status code 201 (created) and the policies must be created diff --git a/python-test/docs/policies/create_4_duplicated_policy_with_new_name.md b/python-test/docs/policies/create_4_duplicated_policy_with_new_name.md deleted file mode 100644 index a3a9f607e..000000000 --- a/python-test/docs/policies/create_4_duplicated_policy_with_new_name.md +++ /dev/null @@ -1,20 +0,0 @@ -## Scenario: Create 4 duplicated policy with new name - -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - Duplicate this policy 4 times inserting new name - -- REST API Method: POST -- endpoint: /policies/agent/{policy_id}/duplicate -- header: {authorization:token} -- body: {"name": "new name"} - - -## Expected Result: - -- All requests must have status code 201 (created) and the policies must be created diff --git a/python-test/docs/policies/create_duplicated_dhcp_policy_without_insert_new_name.md b/python-test/docs/policies/create_duplicated_dhcp_policy_without_insert_new_name.md deleted file mode 100644 index 4d77459a6..000000000 --- a/python-test/docs/policies/create_duplicated_dhcp_policy_without_insert_new_name.md +++ /dev/null @@ -1,20 +0,0 @@ -## Scenario: Create duplicated dhcp policy without insert new name - -## Steps: -1 - Create a dhcp policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - Duplicate this policy - -- REST API Method: POST -- endpoint: /policies/agent/{policy_id}/duplicate -- header: {authorization:token} - - -## Expected Result: - -- 3 request must have status code 201 (created) and the policy must be created -- From the 4th order, it must fail by conflict diff --git a/python-test/docs/policies/create_duplicated_dns_policy_without_insert_new_name.md b/python-test/docs/policies/create_duplicated_dns_policy_without_insert_new_name.md deleted file mode 100644 index fa595447b..000000000 --- a/python-test/docs/policies/create_duplicated_dns_policy_without_insert_new_name.md +++ /dev/null @@ -1,20 +0,0 @@ -## Scenario: Create duplicated dns policy without insert new name - -## Steps: -1 - Create a dns policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - Duplicate this policy - -- REST API Method: POST -- endpoint: /policies/agent/{policy_id}/duplicate -- header: {authorization:token} - - -## Expected Result: - -- 3 request must have status code 201 (created) and the policy must be created -- From the 4th order, it must fail by conflict diff --git a/python-test/docs/policies/create_duplicated_net_policy_without_insert_new_name.md b/python-test/docs/policies/create_duplicated_net_policy_without_insert_new_name.md deleted file mode 100644 index cca34e822..000000000 --- a/python-test/docs/policies/create_duplicated_net_policy_without_insert_new_name.md +++ /dev/null @@ -1,20 +0,0 @@ -## Scenario: Create duplicated net policy without insert new name - -## Steps: -1 - Create a net policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - Duplicate this policy - -- REST API Method: POST -- endpoint: /policies/agent/{policy_id}/duplicate -- header: {authorization:token} - - -## Expected Result: - -- 3 request must have status code 201 (created) and the policy must be created -- From the 4th order, it must fail by conflict diff --git a/python-test/docs/policies/create_policy_with_description.md b/python-test/docs/policies/create_policy_with_description.md deleted file mode 100644 index ec6f42b4a..000000000 --- a/python-test/docs/policies/create_policy_with_description.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create policy with description -## Steps: - -1 - Create a policy with description - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the policy must be created diff --git a/python-test/docs/policies/create_policy_with_dhcp_handler.md b/python-test/docs/policies/create_policy_with_dhcp_handler.md deleted file mode 100644 index 2e46898f9..000000000 --- a/python-test/docs/policies/create_policy_with_dhcp_handler.md +++ /dev/null @@ -1,10 +0,0 @@ -## Scenario: Create policy with dhcp handler -## 1 - Create a policy with dhcp handler, description, host specification, bpf filter and pcap source - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - - -### Expected Result: -- Request must have status code 201 (created) and the policy must be created \ No newline at end of file diff --git a/python-test/docs/policies/create_policy_with_dns_handler.md b/python-test/docs/policies/create_policy_with_dns_handler.md deleted file mode 100644 index d6e029d1e..000000000 --- a/python-test/docs/policies/create_policy_with_dns_handler.md +++ /dev/null @@ -1,54 +0,0 @@ -## Scenario: Create policy with dns handler - -## 1 - Create a policy with dns handler, description, host specification, bpf filter, pcap source, only qname suffix and only rcode - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - - -### Expected Result: -- Request must have status code 201 (created) and the policy must be created - - -## 2 - Create a policy with dns handler, host specification, bpf filter, pcap source, only qname suffix and only rcode - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - - -### Expected Result: -- Request must have status code 201 (created) and the policy must be created - - -## 3 - Scenario: Create a policy with dns handler, bpf filter, pcap source, only qname suffix and only rcode - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - - -### Expected Result: -- Request must have status code 201 (created) and the policy must be created - -## 4 - Scenario: Create a policy with dns handler, pcap source, only qname suffix and only rcode - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - - -### Expected Result: -- Request must have status code 201 (created) and the policy must be created - - -## 5 - Scenario: Create a policy with dns handler, only qname suffix - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - - -### Expected Result: -- Request must have status code 201 (created) and the policy must be created \ No newline at end of file diff --git a/python-test/docs/policies/create_policy_with_duplicate_name.md b/python-test/docs/policies/create_policy_with_duplicate_name.md deleted file mode 100644 index e1599b780..000000000 --- a/python-test/docs/policies/create_policy_with_duplicate_name.md +++ /dev/null @@ -1,13 +0,0 @@ -## Scenario: Create policy with duplicate name -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - Create another policy using the same policy name - -## Expected Result: -- First request must have status code 201 (created) and one policy must be created on orb -- Second request must fail with status code 409 (conflict) and no other policy must be created (make sure that first policy has not been modified) diff --git a/python-test/docs/policies/create_policy_with_invalid_name_(regex).md b/python-test/docs/policies/create_policy_with_invalid_name_(regex).md deleted file mode 100644 index 2541247ea..000000000 --- a/python-test/docs/policies/create_policy_with_invalid_name_(regex).md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Create policy with invalid name (regex) -## Steps: -1 - Create an policy using an invalid regex to policy name - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} -- example of invalid regex: - -* name starting with non-alphabetic characters -* name with just 1 letter -* space-separated composite name - -## Expected Result: -- Request must fail with status code 400 (bad request) and no policy must be created diff --git a/python-test/docs/policies/create_policy_with_multiple_handlers.md b/python-test/docs/policies/create_policy_with_multiple_handlers.md deleted file mode 100644 index 75380cc59..000000000 --- a/python-test/docs/policies/create_policy_with_multiple_handlers.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create policy with multiple handlers -## Steps: - -1 - Create a policy with dns, net and dhcp handler - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the policy must be created \ No newline at end of file diff --git a/python-test/docs/policies/create_policy_with_net_handler.md b/python-test/docs/policies/create_policy_with_net_handler.md deleted file mode 100644 index 042d27dac..000000000 --- a/python-test/docs/policies/create_policy_with_net_handler.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create policy with net handler - -## 1 - Create a policy with net handler, description, host specification, bpf filter and pcap source - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - - -### Expected Result: -- Request must have status code 201 (created) and the policy must be created - diff --git a/python-test/docs/policies/create_policy_with_no_agent_provisioned.md b/python-test/docs/policies/create_policy_with_no_agent_provisioned.md deleted file mode 100644 index aa1c9e7e1..000000000 --- a/python-test/docs/policies/create_policy_with_no_agent_provisioned.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create policy with no agent provisioned -## Steps: -- Without provision any agent go to policies page (`https://orb.live/pages/datasets/policies`) - -1 - Click in "+ NEW POLICY" - -2- Insert a policy label -3 - Click on "NEXT' button - -## Expected Result: - -- Alert message should be displayed informing that there is no agents available \ No newline at end of file diff --git a/python-test/docs/policies/create_policy_without_description.md b/python-test/docs/policies/create_policy_without_description.md deleted file mode 100644 index 50caf3b0f..000000000 --- a/python-test/docs/policies/create_policy_without_description.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create policy without description -## Steps: - -1 - Create a policy without description - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the policy must be created \ No newline at end of file diff --git a/python-test/docs/policies/edit_a_policy_through_the_details_modal.md b/python-test/docs/policies/edit_a_policy_through_the_details_modal.md deleted file mode 100644 index 9e4149248..000000000 --- a/python-test/docs/policies/edit_a_policy_through_the_details_modal.md +++ /dev/null @@ -1,13 +0,0 @@ -## Scenario: Edit a policy through the details modal -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - On policies' page (`orb.live/pages/datasets/policies`) click on details button -3 - Click on "edit" button - -## Expected Result: -- User should be redirected to this policy's edit page and should be able to make changes \ No newline at end of file diff --git a/python-test/docs/policies/edit_policy_bpf_filter_expression.md b/python-test/docs/policies/edit_policy_bpf_filter_expression.md deleted file mode 100644 index 5d805a0e6..000000000 --- a/python-test/docs/policies/edit_policy_bpf_filter_expression.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: Edit policy bpf_filter_expression - -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2- Edit this policy bpf_filter_expression - -- REST API Method: PUT -- endpoint: /policies/agent/policy_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied \ No newline at end of file diff --git a/python-test/docs/policies/edit_policy_description.md b/python-test/docs/policies/edit_policy_description.md deleted file mode 100644 index b898eb081..000000000 --- a/python-test/docs/policies/edit_policy_description.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit policy description -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2- Edit this policy description - -- REST API Method: PUT -- endpoint: /policies/agent/policy_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied diff --git a/python-test/docs/policies/edit_policy_handler.md b/python-test/docs/policies/edit_policy_handler.md deleted file mode 100644 index 1db7c73c9..000000000 --- a/python-test/docs/policies/edit_policy_handler.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: Edit policy handler -## Step: - -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2- Edit this policy handler - -- REST API Method: PUT -- endpoint: /policies/agent/policy_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied diff --git a/python-test/docs/policies/edit_policy_host_specification.md b/python-test/docs/policies/edit_policy_host_specification.md deleted file mode 100644 index bd091f10d..000000000 --- a/python-test/docs/policies/edit_policy_host_specification.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: Edit policy host_specification - -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2- Edit this policy host_specification - -- REST API Method: PUT -- endpoint: /policies/agent/policy_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied \ No newline at end of file diff --git a/python-test/docs/policies/edit_policy_name.md b/python-test/docs/policies/edit_policy_name.md deleted file mode 100644 index a4a575b2b..000000000 --- a/python-test/docs/policies/edit_policy_name.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit policy name -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2- Edit this policy name - -- REST API Method: PUT -- endpoint: /policies/agent/policy_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied \ No newline at end of file diff --git a/python-test/docs/policies/edit_policy_only_qname_suffix.md b/python-test/docs/policies/edit_policy_only_qname_suffix.md deleted file mode 100644 index 425b2ec22..000000000 --- a/python-test/docs/policies/edit_policy_only_qname_suffix.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit policy only_qname_suffix -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2- Edit this policy only_qname_suffix - -- REST API Method: PUT -- endpoint: /policies/agent/policy_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied \ No newline at end of file diff --git a/python-test/docs/policies/edit_policy_only_rcode.md b/python-test/docs/policies/edit_policy_only_rcode.md deleted file mode 100644 index 9069d88d4..000000000 --- a/python-test/docs/policies/edit_policy_only_rcode.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit policy only_rcode -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2- Edit this policy only_rcode - -- REST API Method: PUT -- endpoint: /policies/agent/policy_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied \ No newline at end of file diff --git a/python-test/docs/policies/edit_policy_pcap_source.md b/python-test/docs/policies/edit_policy_pcap_source.md deleted file mode 100644 index 274fc720e..000000000 --- a/python-test/docs/policies/edit_policy_pcap_source.md +++ /dev/null @@ -1,17 +0,0 @@ -## Scenario: Edit policy pcap_source -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2- Edit this policy pcap_source - -- REST API Method: PUT -- endpoint: /policies/agent/policy_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied \ No newline at end of file diff --git a/python-test/docs/policies/remove_policy_using_correct_name.md b/python-test/docs/policies/remove_policy_using_correct_name.md deleted file mode 100644 index b37d15162..000000000 --- a/python-test/docs/policies/remove_policy_using_correct_name.md +++ /dev/null @@ -1,16 +0,0 @@ -## Scenario: Remove policy using correct name -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - On policies' page (`orb.live/pages/datasets/policies`) click on remove button -3 - Insert the name of the policy correctly on delete modal -4 - Confirm the operation by clicking on "I UNDERSTAND, DELETE THIS POLICY" button - -## Expected Result: -- Policy must be deleted - - \ No newline at end of file diff --git a/python-test/docs/policies/remove_policy_using_incorrect_name.md b/python-test/docs/policies/remove_policy_using_incorrect_name.md deleted file mode 100644 index 1a4337140..000000000 --- a/python-test/docs/policies/remove_policy_using_incorrect_name.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Remove policy using incorrect name -## Steps: -1 - Create a policy - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - On policies' page (`orb.live/pages/datasets/policies`) click on remove button -3 - Insert the name of the policy incorrectly on delete modal - -## Expected Result: -- "I UNDERSTAND, DELETE THIS POLICY" button must not be enabled -- After user close the deletion modal, policy must not be deleted diff --git a/python-test/docs/policies/test_policy_filters.md b/python-test/docs/policies/test_policy_filters.md deleted file mode 100644 index abc661740..000000000 --- a/python-test/docs/policies/test_policy_filters.md +++ /dev/null @@ -1,19 +0,0 @@ -## Scenario: Test policy filters -## Steps: -1 - Create multiple policies - -- REST API Method: POST -- endpoint: /policies/agent/ -- header: {authorization:token} - -2 - On policies' page (`orb.live/pages/datasets/policies`) use the filter: - - * Name - * Description - * Version - * Search by - - -## Expected Result: - -- All filters must be working properly diff --git a/python-test/docs/sanity.md b/python-test/docs/sanity.md deleted file mode 100644 index b1f2a5c83..000000000 --- a/python-test/docs/sanity.md +++ /dev/null @@ -1,165 +0,0 @@ -## Sanity tests - - -## Login - -- [Request registration of a registered account using registered password username and company](login/request_registration_of_a_registered_account_using_registered_password_username_and_company.md) -- [Request registration of a registered account using registered password and username](login/request_registration_of_a_registered_account_using_registered_password_and_username.md) -- [Request registration of a registered account using registered password and company](login/request_registration_of_a_registered_account_using_registered_password_and_company.md) -- [Request registration of a registered account using registered password](login/request_registration_of_a_registered_account_using_registered_password.md) -- [Request registration of a registered account using unregistered password username and company](login/request_registration_of_a_registered_account_using_unregistered_password_username_and_company.md) -- [Request registration of a registered account using unregistered password and username](login/request_registration_of_a_registered_account_using_unregistered_password_and_username.md) -- [Request registration of a registered account using unregistered password and company](login/request_registration_of_a_registered_account_using_unregistered_password_and_company.md) -- [Request registration of a registered account using unregistered password](login/request_registration_of_a_registered_account_using_unregistered_password.md) -- [Request registration of an unregistered account with valid password and invalid email](login/request_registration_of_an_unregistered_account_with_valid_password_and_invalid_email.md) -- [Request registration of an unregistered account with valid password and valid email](login/request_registration_of_an_unregistered_account_with_valid_password_and_valid_email.md) -- [Request registration of an unregistered account with invalid password and valid email](login/request_registration_of_an_unregistered_account_with_invalid_password_and_valid_email.md) -- [Request registration of an unregistered account with invalid password and invalid email](login/request_registration_of_an_unregistered_account_with_invalid_password_and_invalid_email.md) -- [Check if email and password are required fields](login/check_if_email_and_password_are_required_fields.md) -- [Login with valid credentials](login/login_with_valid_credentials.md) -- [Login with invalid credentials](login/login_with_invalid_credentials.md) -- [Request password with registered email address](login/request_password_with_registered_email_address.md) -- [Request password with unregistered email address](login/request_password_with_unregistered_email_address.md) - - -## Agents - -- [Create agent without tags](agents/create_agent_without_tags.md) -- [Create agent with one tag](agents/create_agent_with_one_tag.md) -- [Create agent with multiple tags](agents/create_agent_with_multiple_tags.md) -- [Create agent with invalid name (regex)](agents/create_agent_with_invalid_name_(regex).md) -- [Create agent with duplicate name](agents/create_agent_with_duplicate_name.md) -- [Check agent details](agents/check_agent_details.md) -- [Edit an agent through the details modal](agents/edit_an_agent_through_the_details_modal.md) -- [Edit agent name](agents/edit_agent_name.md) -- [Edit agent tag](agents/edit_agent_tag.md) -- [Save agent without tag](agents/save_agent_without_tag.md) -- [Insert tags in agents created without tags](agents/insert_tags_in_agents_created_without_tags.md) -- [Remove agent using correct name](agents/remove_agent_using_correct_name.md) -- [Remove agent using incorrect name](agents/remove_agent_using_incorrect_name.md) -- [Run two orb agents on the same port](agents/run_two_orb_agents_on_the_same_port.md) -- [Run two orb agents on different ports](agents/run_two_orb_agents_on_different_ports.md) -- [Edit agent name and tag](agents/edit_agent_name_and_tags.md) - - -## Agent Groups - -- [Create agent group with invalid name (regex)](agent_groups/create_agent_group_with_invalid_name_(regex).md) -- [Create agent group with duplicate name](agent_groups/create_agent_group_with_duplicate_name.md) -- [Create agent group with description](agent_groups/create_agent_group_with_description.md) -- [Create agent group without description](agent_groups/create_agent_group_without_description.md) -- [Create agent group without tag](agent_groups/create_agent_group_without_tag.md) -- [Create agent group with one tag](agent_groups/create_agent_group_with_one_tag.md) -- [Create agent group with multiple tags](agent_groups/create_agent_group_with_multiple_tags.md) -- [Visualize matching agents](agent_groups/visualize_matching_agents.md) -- [Check agent groups details](agent_groups/check_agent_groups_details.md) -- [Edit an agent group through the details modal](agent_groups/edit_an_agent_group_through_the_details_modal.md) -- [Edit agent group name](agent_groups/edit_agent_group_name.md) -- [Edit agent group description](agent_groups/edit_agent_group_description.md) -- [Edit agent group tag](agent_groups/edit_agent_group_tag.md) -- [Remove agent group using correct name](agent_groups/remove_agent_group_using_correct_name.md) -- [Remove agent group using incorrect name](agent_groups/remove_agent_group_using_incorrect_name.md) -- [Run two orb agents on the same port](agents/run_two_orb_agents_on_the_same_port.md) -- [Run two orb agents on different ports](agents/run_two_orb_agents_on_different_ports.md) -- [Edit Agent Group name removing name](agent_groups/edit_agent_group_name_removing_name.md) -- [Edit agent group name](agent_groups/edit_agent_group_name.md) -- [Edit agent group description](agent_groups/edit_agent_group_description.md) -- [Edit Agent Group description removing description](agent_groups/edit_agent_group_description_removing_description.md) -- [Edit Agent Group tags to subscribe agent](agent_groups/edit_agent_group_tags_to_subscribe_agent.md) -- [Edit Agent Group tags to unsubscribe agent](agent_groups/edit_agent_group_tags_to_unsubscribe_agent.md) -- [Edit Agent Group removing tags](agent_groups/edit_agent_group_removing_tags.md) -- [Edit Agent Group name, description and tags](agent_groups/edit_agent_group_name,_description_and_tags.md) - - -## Sinks - -- [Create sink with invalid name (regex)](sinks/create_sink_with_invalid_name_(regex).md) -- [Create sink with duplicate name](sinks/create_sink_with_duplicate_name.md) -- [Create sink with description](sinks/create_sink_with_description.md) -- [Create sink without description](sinks/create_sink_without_description.md) -- [Create sink without tags](sinks/create_sink_without_tags.md) -- [Check if remote host, username and password are required to create a sink](sinks/check_if_remote_host,_username_and_password_are_required_to_create_a_sink.md) -- [Check sink details](sinks/check_sink_details.md) -- [Edit a sink through the details modal](sinks/edit_a_sink_through_the_details_modal.md) -- [Edit sink name](sinks/edit_sink_name.md) -- [Edit sink description](sinks/edit_sink_description.md) -- [Edit sink remote host](sinks/edit_sink_remote_host.md) -- [Edit sink username](sinks/edit_sink_username.md) -- [Edit sink password](sinks/edit_sink_password.md) -- [Edit sink tags](sinks/edit_sink_tags.md) -- [Remove sink using correct name](sinks/remove_sink_using_correct_name.md) -- [Remove sink using incorrect name](sinks/remove_sink_using_incorrect_name.md) - -## Policies - -- [Create policy with invalid name (regex)](policies/create_policy_with_invalid_name_(regex).md) -- [Create policy with no agent provisioned](policies/create_policy_with_no_agent_provisioned.md) -- [Create policy with duplicate name](policies/create_policy_with_duplicate_name.md) -- [Create policy with description](policies/create_policy_with_description.md) -- [Create policy without description](policies/create_policy_without_description.md) -- [Create policy with dhcp handler](policies/create_policy_with_dhcp_handler.md) -- [Create policy with dns handler](policies/create_policy_with_dns_handler.md) -- [Create policy with net handler](policies/create_policy_with_net_handler.md) -- [Create policy with multiple handlers](policies/create_policy_with_multiple_handlers.md) -- [Check policies details](policies/check_policies_details.md) -- [Edit a policy through the details modal](policies/edit_a_policy_through_the_details_modal.md) -- [Edit policy name](policies/edit_policy_name.md) -- [Edit policy host_specification](policies/edit_policy_host_specification.md) -- [Edit policy bpf_filter_expression](policies/edit_policy_bpf_filter_expression.md) -- [Edit policy pcap_source](policies/edit_policy_pcap_source.md) -- [Edit policy only_qname_suffix](policies/edit_policy_only_qname_suffix.md) -- [Edit policy only_rcode](policies/edit_policy_only_rcode.md) -- [Edit policy description](policies/edit_policy_description.md) -- [Edit policy handler](policies/edit_policy_handler.md) -- [Remove policy using correct name](policies/remove_policy_using_correct_name.md) -- [Remove policy using incorrect name](policies/remove_policy_using_incorrect_name.md) -- [Create duplicated net policy without insert new name](policies/create_duplicated_net_policy_without_insert_new_name.md) -- [Create duplicated dhcp policy without insert new name](policies/create_duplicated_dhcp_policy_without_insert_new_name.md) -- [Create duplicated dns policy without insert new name](policies/create_duplicated_dns_policy_without_insert_new_name.md) -- [Create 4 duplicated policy with new name](policies/create_4_duplicated_policy_with_new_name.md) -- [Create 3 duplicated dns policy without insert new name and 1 with new name](policies/create_3_duplicated_dns_policy_without_insert_new_name_and_1_with_new_name.md) - -## Datasets - -- [Create dataset with invalid name (regex)](datasets/create_dataset_with_invalid_name_(regex).md) -- [Create dataset](datasets/create_dataset.md) -- [Check datasets details](datasets/check_datasets_details.md) -- [Edit dataset name](datasets/edit_dataset_name.md) -- [Edit dataset sink](datasets/edit_dataset_sink.md) -- [Remove dataset using correct name](datasets/remove_dataset_using_correct_name.md) -- [Remove dataset using incorrect name](datasets/remove_dataset_using_incorrect_name.md) - -## Integration tests - -- [Check if sink is active while scraping metrics](integration/sink_active_while_scraping_metrics.md) -- [Check if sink with invalid credentials becomes active](integration/sink_error_invalid_credentials.md) -- [Provision agent before group (check if agent subscribes to the group)](integration/provision_agent_before_group.md) -- [Provision agent after group (check if agent subscribes to the group)](integration/provision_agent_after_group.md) -- [Provision agent with tag matching existing group linked to a valid dataset](integration/multiple_agents_subscribed_to_a_group.md) -- [Apply multiple policies to a group](integration/apply_multiple_policies.md) -- [Apply multiple policies to a group and remove one policy](integration/remove_one_of_multiple_policies.md) -- [Apply multiple policies to a group and remove one dataset](integration/remove_one_of_multiple_datasets.md) -- [Apply the same policy twice to the agent](integration/apply_policy_twice.md) -- [Remove group (invalid dataset, agent logs)](integration/remove_group.md) -- [Remove sink (invalid dataset, agent logs)](integration/remove_sink.md) -- [Remove policy (invalid dataset, agent logs, heartbeat)](integration/remove_policy.md) -- [Remove dataset (check agent logs, heartbeat)](integration/remove_dataset.md) -- [Remove agent container (logs, agent groups matches)](integration/remove_agent_container.md) -- [Remove agent container force (logs, agent groups matches)](integration/remove_agent_container_force.md) -- [Remove agent (logs, agent groups matches)](integration/remove_agent.md) -- [Subscribe an agent to multiple groups created before agent provisioning](integration/subscribe_an_agent_to_multiple_groups_created_before_agent_provisioning.md) -- [Subscribe an agent to multiple groups created after agent provisioning](integration/subscribe_an_agent_to_multiple_groups_created_after_agent_provisioning.md) -- [Agent subscription to group after editing orb agent's tags](integration/agent_subscription_to_group_after_editing_agent's_tags.md) -- [Agent subscription to group with policies after editing orb agent's tags](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags.md) -- [Edit agent name and apply policies to then](integration/edit_agent_name_and_apply_policies_to_then.md) -- [Insert tags in agents created without tags and apply policies to group matching new tags.md](integration/insert_tags_in_agents_created_without_tags_and_apply_policies_to_group_matching_new_tags.md) -- [Agent unsubscription to group with policies after editing agent group's tags (editing tags after agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent subscription to group with policies after editing agent group's tags (editing tags after agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent unsubscription to group with policies after editing agent group's tags (editing tags before agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Agent subscription to group with policies after editing agent group's tags (editing tags before agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Agent unsubscription to group with policies after editing orb agent's tags and agent group's tags (editing tags after agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent subscription to group with policies after editing orb agent's tags and agent group's tags (editing tags after agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent unsubscription to group with policies after editing orb agent's tags and agent group's tags (editing tags before agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Agent subscription to group with policies after editing orb agent's tags and agent group's tags (editing tags before agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Remotely reset an agent with policies](integration/reset_agent_remotely.md) -- [Remotely reset an agent without policies](integration/reset_agent_remotely.md) diff --git a/python-test/docs/sinks/check_if_is_possible_cancel_operations_with_no_change.md b/python-test/docs/sinks/check_if_is_possible_cancel_operations_with_no_change.md deleted file mode 100644 index 3ed43df35..000000000 --- a/python-test/docs/sinks/check_if_is_possible_cancel_operations_with_no_change.md +++ /dev/null @@ -1,24 +0,0 @@ -## Scenario: Check if is possible cancel operations with no change -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2 - On sinks' page (`orb.live/pages/sinks`) click on edit button - -3 - Change sinks' name - -4 - Change sink's description and click "next" - -5 - Change sink's remote host - -6 - Change sink's username - -7 - Change sink's password - -8 - Click "back" until return to sinks' page - -## Expected Result: -- No changes must have been applied to the sink diff --git a/python-test/docs/sinks/check_if_remote_host,_username_and_password_are_required_to_create_a_sink.md b/python-test/docs/sinks/check_if_remote_host,_username_and_password_are_required_to_create_a_sink.md deleted file mode 100644 index f8d5c3e9b..000000000 --- a/python-test/docs/sinks/check_if_remote_host,_username_and_password_are_required_to_create_a_sink.md +++ /dev/null @@ -1,49 +0,0 @@ -## Scenario: Check if remote host, username and password are required to create a sink - ---------------------------------------------------------- - -## Without remote host - - -## Steps: -1 - Create a sink without remote host - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -## Expected Result: - -- Request must fail with status code 400 (bad request) - --------------------------------------------------------- - -## Without username - - -## Steps: -1 - Create a sink without username - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -## Expected Result: - -- Request must fail with status code 400 (bad request) - --------------------------------------------------------- - -## Without password - - -## Steps: -1 - Create a sink without password - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -## Expected Result: - -- Request must fail with status code 400 (bad request) \ No newline at end of file diff --git a/python-test/docs/sinks/check_if_total_sinks_on_sinks'_page_is_correct.md b/python-test/docs/sinks/check_if_total_sinks_on_sinks'_page_is_correct.md deleted file mode 100644 index e3753e751..000000000 --- a/python-test/docs/sinks/check_if_total_sinks_on_sinks'_page_is_correct.md +++ /dev/null @@ -1,21 +0,0 @@ -## Scenario: Check if total sinks on sinks' page is correct -## Steps: -1 - Create multiple sinks - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2 - Get all existing sinks - -- REST API Method: GET -- endpoint: /sinks - -3 - On sinks' page (`orb.live/pages/sinks`) check the total number of sinks at the end of the sinks table - -4 - Count the number of existing sinks - -## Expected Result: -- Total sinks on API response, sinks page and the real number must be the same - - diff --git a/python-test/docs/sinks/check_sink_details.md b/python-test/docs/sinks/check_sink_details.md deleted file mode 100644 index 4cb75d8f3..000000000 --- a/python-test/docs/sinks/check_sink_details.md +++ /dev/null @@ -1,19 +0,0 @@ -## Scenario: Check sink details -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2 - Get a sink - -- REST API Method: GET -- endpoint: /sinks/sink_id - -## Expected Result: -- Status code must be 200 and sink name, description, service type, remote host, status, username and tags must be returned on response - - * If a sink never received data, status must be `new` - * If a sink is receiving data, status must be `active` - * If a sink has not received data for more than 30 minutes, status must be `idle` \ No newline at end of file diff --git a/python-test/docs/sinks/create_sink_with_description.md b/python-test/docs/sinks/create_sink_with_description.md deleted file mode 100644 index c09153b7e..000000000 --- a/python-test/docs/sinks/create_sink_with_description.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create sink with description -## Steps: - -1 - Create a sink with description - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the sink must be created diff --git a/python-test/docs/sinks/create_sink_with_duplicate_name.md b/python-test/docs/sinks/create_sink_with_duplicate_name.md deleted file mode 100644 index 134a49c9e..000000000 --- a/python-test/docs/sinks/create_sink_with_duplicate_name.md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Create sink with duplicate name -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2 - Create another sink using the same sink name - -## Expected Result: -- First request must have status code 201 (created) and one sink must be created on orb -- Second request must fail with status code 409 (conflict) and no other sink must be created (make sure that first sink has not been modified) - diff --git a/python-test/docs/sinks/create_sink_with_invalid_name_(regex).md b/python-test/docs/sinks/create_sink_with_invalid_name_(regex).md deleted file mode 100644 index dc095ece9..000000000 --- a/python-test/docs/sinks/create_sink_with_invalid_name_(regex).md +++ /dev/null @@ -1,14 +0,0 @@ -## Scenario: Create sink with invalid name (regex) -## Steps: -1 - Create a sink using an invalid regex to sink name - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} -- example of invalid regex: - * name starting with non-alphabetic characters - * name with just 1 letter - * space-separated composite name - -## Expected Result: -- Request must fail with status code 400 (bad request) and no sink must be created diff --git a/python-test/docs/sinks/create_sink_with_multiple_tags.md b/python-test/docs/sinks/create_sink_with_multiple_tags.md deleted file mode 100644 index 2a935bc3a..000000000 --- a/python-test/docs/sinks/create_sink_with_multiple_tags.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create sink with multiple tags -## Steps: -1 - Create a sink with more than one pair (key:value) of tags - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the sink must be created -- Tags for sink just serve to filter the sinks \ No newline at end of file diff --git a/python-test/docs/sinks/create_sink_with_tags.md b/python-test/docs/sinks/create_sink_with_tags.md deleted file mode 100644 index 248ab56ff..000000000 --- a/python-test/docs/sinks/create_sink_with_tags.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create sink with tags -## Steps: -1 - Create a sink with one pair (key:value) of tags - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the sink must be created -- Tags for sink just serve to filter the sinks diff --git a/python-test/docs/sinks/create_sink_without_description.md b/python-test/docs/sinks/create_sink_without_description.md deleted file mode 100644 index d8936d036..000000000 --- a/python-test/docs/sinks/create_sink_without_description.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create sink without description -## Steps: -1 - Create a sink without description - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the sink must be created -- Tags for sink just serve to filter the sinks \ No newline at end of file diff --git a/python-test/docs/sinks/create_sink_without_tags.md b/python-test/docs/sinks/create_sink_without_tags.md deleted file mode 100644 index c1694dbe0..000000000 --- a/python-test/docs/sinks/create_sink_without_tags.md +++ /dev/null @@ -1,12 +0,0 @@ -## Scenario: Create sink without tags -## Steps: -1 - Create a sink without any pair (key:value) of tags - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 201 (created) and the sink must be created -- Tags for sink just serve to filter the sinks \ No newline at end of file diff --git a/python-test/docs/sinks/edit_a_sink_through_the_details_modal.md b/python-test/docs/sinks/edit_a_sink_through_the_details_modal.md deleted file mode 100644 index 4f75cb8b9..000000000 --- a/python-test/docs/sinks/edit_a_sink_through_the_details_modal.md +++ /dev/null @@ -1,13 +0,0 @@ -## Scenario: Edit a sink through the details modal -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2 - On sinks' page (`orb.live/pages/sinks`) click on details button -3 - Click on "edit" button - -## Expected Result: -- User should be redirected to this sink's edit page and should be able to make changes \ No newline at end of file diff --git a/python-test/docs/sinks/edit_sink_description.md b/python-test/docs/sinks/edit_sink_description.md deleted file mode 100644 index 48f6c56dc..000000000 --- a/python-test/docs/sinks/edit_sink_description.md +++ /dev/null @@ -1,19 +0,0 @@ -## Scenario: Edit sink description -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2- Edit this sink description - -- REST API Method: PUT -- endpoint: /sinks/sink_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied - - diff --git a/python-test/docs/sinks/edit_sink_name.md b/python-test/docs/sinks/edit_sink_name.md deleted file mode 100644 index dcca3fba5..000000000 --- a/python-test/docs/sinks/edit_sink_name.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: Edit sink name -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2- Edit this sink name - -- REST API Method: PUT -- endpoint: /sinks/sink_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied - \ No newline at end of file diff --git a/python-test/docs/sinks/edit_sink_password.md b/python-test/docs/sinks/edit_sink_password.md deleted file mode 100644 index 051514a58..000000000 --- a/python-test/docs/sinks/edit_sink_password.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: Edit sink password -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2- Edit this sink password - -- REST API Method: PUT -- endpoint: /sinks/sink_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied - \ No newline at end of file diff --git a/python-test/docs/sinks/edit_sink_remote_host.md b/python-test/docs/sinks/edit_sink_remote_host.md deleted file mode 100644 index 6d3b20433..000000000 --- a/python-test/docs/sinks/edit_sink_remote_host.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: Edit sink remote host -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2- Edit this sink remote host - -- REST API Method: PUT -- endpoint: /sinks/sink_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied - \ No newline at end of file diff --git a/python-test/docs/sinks/edit_sink_tags.md b/python-test/docs/sinks/edit_sink_tags.md deleted file mode 100644 index fc0566f7d..000000000 --- a/python-test/docs/sinks/edit_sink_tags.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: Edit sink tags -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2- Edit this sink tags - -- REST API Method: PUT -- endpoint: /sinks/sink_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied - \ No newline at end of file diff --git a/python-test/docs/sinks/edit_sink_username.md b/python-test/docs/sinks/edit_sink_username.md deleted file mode 100644 index 3a211dcf1..000000000 --- a/python-test/docs/sinks/edit_sink_username.md +++ /dev/null @@ -1,18 +0,0 @@ -## Scenario: Edit sink username -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2- Edit this sink username - -- REST API Method: PUT -- endpoint: /sinks/sink_id -- header: {authorization:token} - - -## Expected Result: -- Request must have status code 200 (ok) and changes must be applied - \ No newline at end of file diff --git a/python-test/docs/sinks/remove_sink_using_correct_name.md b/python-test/docs/sinks/remove_sink_using_correct_name.md deleted file mode 100644 index 644417022..000000000 --- a/python-test/docs/sinks/remove_sink_using_correct_name.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Remove sink using correct name -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2 - On agent groups' page (`orb.live/pages/sinks`) click on remove button -3 - Insert the name of the sink correctly on delete modal -4 - Confirm the operation by clicking on "I UNDERSTAND, DELETE THIS SINK" button - -## Expected Result: -- Sink must be deleted - diff --git a/python-test/docs/sinks/remove_sink_using_incorrect_name.md b/python-test/docs/sinks/remove_sink_using_incorrect_name.md deleted file mode 100644 index c1ff8dc5f..000000000 --- a/python-test/docs/sinks/remove_sink_using_incorrect_name.md +++ /dev/null @@ -1,15 +0,0 @@ -## Scenario: Remove sink using incorrect name -## Steps: -1 - Create a sink - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2 - On agent groups' page (`orb.live/pages/sinks`) click on remove button -3 - Insert the name of the sink correctly on delete modal - -## Expected Result: -- Sink must be deleted -- "I UNDERSTAND, DELETE THIS SINK" button must not be enabled -- After user close the deletion modal, sink must not be deleted \ No newline at end of file diff --git a/python-test/docs/sinks/test_sink_filters.md b/python-test/docs/sinks/test_sink_filters.md deleted file mode 100644 index 8a406e8d5..000000000 --- a/python-test/docs/sinks/test_sink_filters.md +++ /dev/null @@ -1,23 +0,0 @@ -## Scenario: Test sink filters -## Steps: -1 - Create multiple sinks - -- REST API Method: POST -- endpoint: /sinks -- header: {authorization:token} - -2 - On sinks' page (`orb.live/pages/sinks`) use the filter: - - * Name - * Description - * Type - * Status - * Tags - * Search by - - -## Expected Result: - -- All filters must be working properly - - diff --git a/python-test/docs/smoke.md b/python-test/docs/smoke.md deleted file mode 100644 index 3668791eb..000000000 --- a/python-test/docs/smoke.md +++ /dev/null @@ -1,113 +0,0 @@ -## Smoke tests - -## Login - -- [Request registration of a registered account using registered password username and company](login/request_registration_of_a_registered_account_using_registered_password_username_and_company.md) -- [Request registration of a registered account using registered password and username](login/request_registration_of_a_registered_account_using_registered_password_and_username.md) -- [Request registration of a registered account using registered password and company](login/request_registration_of_a_registered_account_using_registered_password_and_company.md) -- [Request registration of a registered account using registered password](login/request_registration_of_a_registered_account_using_registered_password.md) -- [Request registration of a registered account using unregistered password username and company](login/request_registration_of_a_registered_account_using_unregistered_password_username_and_company.md) -- [Request registration of a registered account using unregistered password and username](login/request_registration_of_a_registered_account_using_unregistered_password_and_username.md) -- [Request registration of a registered account using unregistered password and company](login/request_registration_of_a_registered_account_using_unregistered_password_and_company.md) -- [Request registration of a registered account using unregistered password](login/request_registration_of_a_registered_account_using_unregistered_password.md) -- [Request registration of an unregistered account with valid password and invalid email](login/request_registration_of_an_unregistered_account_with_valid_password_and_invalid_email.md) -- [Request registration of an unregistered account with valid password and valid email](login/request_registration_of_an_unregistered_account_with_valid_password_and_valid_email.md) -- [Request registration of an unregistered account with invalid password and valid email](login/request_registration_of_an_unregistered_account_with_invalid_password_and_valid_email.md) -- [Request registration of an unregistered account with invalid password and invalid email](login/request_registration_of_an_unregistered_account_with_invalid_password_and_invalid_email.md) -- [Check if email and password are required fields](login/check_if_email_and_password_are_required_fields.md) -- [Login with valid credentials](login/login_with_valid_credentials.md) -- [Login with invalid credentials](login/login_with_invalid_credentials.md) -- [Request password with registered email address](login/request_password_with_registered_email_address.md) - - -## Agents - -- [Create agent with one tag](agents/create_agent_with_one_tag.md) -- [Edit agent name](agents/edit_agent_name.md) -- [Edit agent tag](agents/edit_agent_tag.md) -- [Save agent without tag](agents/save_agent_without_tag.md) -- [Insert tags in agents created without tags](agents/insert_tags_in_agents_created_without_tags.md) -- [Remove agent using correct name](agents/remove_agent_using_correct_name.md) -- [Run two orb agents on the same port](agents/run_two_orb_agents_on_the_same_port.md) -- [Run two orb agents on different ports](agents/run_two_orb_agents_on_different_ports.md) - -## Agent Groups - -- [Create agent group with description](agent_groups/create_agent_group_with_description.md) -- [Create agent group with one tag](agent_groups/create_agent_group_with_one_tag.md) -- [Edit agent group name](agent_groups/edit_agent_group_name.md) -- [Edit agent group tag](agent_groups/edit_agent_group_tag.md) -- [Remove agent group using correct name](agent_groups/remove_agent_group_using_correct_name.md) -- [Run two orb agents on the same port](agents/run_two_orb_agents_on_the_same_port.md) -- [Run two orb agents on different ports](agents/run_two_orb_agents_on_different_ports.md) -- [Edit Agent Group name removing name](agent_groups/edit_agent_group_name_removing_name.md) -- [Edit agent group name](agent_groups/edit_agent_group_name.md) -- [Edit agent group description](agent_groups/edit_agent_group_description.md) -- [Edit Agent Group description removing description](agent_groups/edit_agent_group_description_removing_description.md) -- [Edit Agent Group tags to subscribe agent](agent_groups/edit_agent_group_tags_to_subscribe_agent.md) -- [Edit Agent Group tags to unsubscribe agent](agent_groups/edit_agent_group_tags_to_unsubscribe_agent.md) -- [Edit Agent Group removing tags](agent_groups/edit_agent_group_removing_tags.md) -- [Edit Agent Group name, description and tags](agent_groups/edit_agent_group_name,_description_and_tags.md) - - -## Sinks - -- [Create sink with description](sinks/create_sink_with_description.md) -- [Create sink without tags](sinks/create_sink_without_tags.md) -- [Remove sink using correct name](sinks/remove_sink_using_correct_name.md) - -## Policies - -- [Create policy with description](policies/create_policy_with_description.md) -- [Create policy with dhcp handler](policies/create_policy_with_dhcp_handler.md) -- [Create policy with dns handler](policies/create_policy_with_dns_handler.md) -- [Create policy with net handler](policies/create_policy_with_net_handler.md) -- [Edit policy handler](policies/edit_policy_handler.md) -- [Remove policy using correct name](policies/remove_policy_using_correct_name.md) -- [Create duplicated net policy without insert new name](policies/create_duplicated_net_policy_without_insert_new_name.md) -- [Create duplicated dhcp policy without insert new name](policies/create_duplicated_dhcp_policy_without_insert_new_name.md) -- [Create duplicated dns policy without insert new name](policies/create_duplicated_dns_policy_without_insert_new_name.md) -- [Create 4 duplicated policy with new name](policies/create_4_duplicated_policy_with_new_name.md) -- [Create 3 duplicated dns policy without insert new name and 1 with new name](policies/create_3_duplicated_dns_policy_without_insert_new_name_and_1_with_new_name.md) - - -## Datasets - -- [Create dataset](datasets/create_dataset.md) -- [Remove dataset using correct name](datasets/remove_dataset_using_correct_name.md) - - -## Integration tests - -- [Check if sink is active while scraping metrics](integration/sink_active_while_scraping_metrics.md) -- [Check if sink with invalid credentials becomes active](integration/sink_error_invalid_credentials.md) -- [Provision agent before group (check if agent subscribes to the group)](integration/provision_agent_before_group.md) -- [Provision agent after group (check if agent subscribes to the group)](integration/provision_agent_after_group.md) -- [Provision agent with tag matching existing group linked to a valid dataset](integration/multiple_agents_subscribed_to_a_group.md) -- [Apply multiple policies to a group](integration/apply_multiple_policies.md) -- [Apply multiple policies to a group and remove one policy](integration/remove_one_of_multiple_policies.md) -- [Apply multiple policies to a group and remove one dataset](integration/remove_one_of_multiple_datasets.md) -- [Apply the same policy twice to the agent](integration/apply_policy_twice.md) -- [Remove group (invalid dataset, agent logs)](integration/remove_group.md) -- [Remove sink (invalid dataset, agent logs)](integration/remove_sink.md) -- [Remove policy (invalid dataset, agent logs, heartbeat)](integration/remove_policy.md) -- [Remove dataset (check agent logs, heartbeat)](integration/remove_dataset.md) -- [Remove agent container (logs, agent groups matches)](integration/remove_agent_container.md) -- [Remove agent container force (logs, agent groups matches)](integration/remove_agent_container_force.md) -- [Remove agent (logs, agent groups matches)](integration/remove_agent.md) -- [Subscribe an agent to multiple groups created before agent provisioning](integration/subscribe_an_agent_to_multiple_groups_created_before_agent_provisioning.md) -- [Subscribe an agent to multiple groups created after agent provisioning](integration/subscribe_an_agent_to_multiple_groups_created_after_agent_provisioning.md) -- [Agent subscription to group after editing orb agent's tags](integration/agent_subscription_to_group_after_editing_agent's_tags.md) -- [Agent subscription to group with policies after editing orb agent's tags](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags.md) -- [Edit agent name and apply policies to then](integration/edit_agent_name_and_apply_policies_to_then.md) -- [Insert tags in agents created without tags and apply policies to group matching new tags.md](integration/insert_tags_in_agents_created_without_tags_and_apply_policies_to_group_matching_new_tags.md) -- [Agent unsubscription to group with policies after editing agent group's tags (editing tags after agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent subscription to group with policies after editing agent group's tags (editing tags after agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent unsubscription to group with policies after editing agent group's tags (editing tags before agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Agent subscription to group with policies after editing agent group's tags (editing tags before agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Agent unsubscription to group with policies after editing orb agent's tags and agent group's tags (editing tags after agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent subscription to group with policies after editing orb agent's tags and agent group's tags (editing tags after agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_after_agent_provision.md) -- [Agent unsubscription to group with policies after editing orb agent's tags and agent group's tags (editing tags before agent provision)](integration/agent_unsubscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Agent subscription to group with policies after editing orb agent's tags and agent group's tags (editing tags before agent provision)](integration/agent_subscription_to_group_with_policies_after_editing_agent's_tags_and_agent_group's_tags_editing_tags_before_agent_provision.md) -- [Remotely reset an agent with policies](integration/reset_agent_remotely.md) -- [Remotely reset an agent without policies](integration/reset_agent_remotely.md) diff --git a/python-test/features/integration.feature b/python-test/features/integration.feature index 127af917f..b271abf38 100644 --- a/python-test/features/integration.feature +++ b/python-test/features/integration.feature @@ -2,6 +2,29 @@ Feature: Integration tests +@private +Scenario: General smoke test to validate private agent image + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + When a new agent is created with 1 orb tag(s) + And the agent container is started on an available port + And the agent status is online + And referred agent is subscribed to 1 group + And 2 simple policies are applied to the group + Then backends route must be enabled + And handlers route must be enabled + And taps route must be enabled + And inputs route must be enabled + And pktvisor state is running + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + + @smoke Scenario: Test agents backend routes Given the Orb user has a registered account @@ -65,7 +88,7 @@ Scenario: Apply two simple policies to an agent Then this agent's heartbeat shows that 2 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -191,7 +214,7 @@ Scenario: Provision agent with tag matching existing group linked to a valid dat Then this agent's heartbeat shows that 2 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -209,7 +232,7 @@ Scenario: Provision agent with tag matching existing group with multiple policie And this agent's heartbeat shows that 14 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And 14 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -228,7 +251,7 @@ Scenario: Provision agent with tag matching existing edited group with multiple And this agent's heartbeat shows that 14 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And 14 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -247,11 +270,28 @@ Scenario: Provision agent with tag matching existing group with multiple policie And this agent's heartbeat shows that 20 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And 20 dataset(s) have validity valid and 0 have validity invalid in 30 seconds +@sanity @sink_status_idle +Scenario: Sink idle after 5 minutes without metrics flow + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + When stop the orb-agent container + Then referred sink must have idle state on response after 660 seconds -@smoke +@smoke @sink_status_error Scenario: Sink with invalid endpoint Given the Orb user has a registered account And the Orb user logs in @@ -267,7 +307,7 @@ Scenario: Sink with invalid endpoint And the container logs should contain the message "managing agent policy from core" within 30 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs should contain the message "scraped metrics for policy" within 180 seconds - And referred sink must have error state on response within 30 seconds + And referred sink must have error state on response within 120 seconds And 4 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -332,7 +372,7 @@ Scenario: Unapplying policies that failed by removing group And 0 dataset(s) have validity valid and 4 have validity invalid in 30 seconds -@smoke +@smoke @sink_status_error Scenario: Sink with invalid username Given the Orb user has a registered account And the Orb user logs in @@ -348,11 +388,11 @@ Scenario: Sink with invalid username And this agent's heartbeat shows that 4 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs should contain the message "scraped metrics for policy" within 180 seconds - And referred sink must have error state on response within 30 seconds + And referred sink must have error state on response within 120 seconds And 4 dataset(s) have validity valid and 0 have validity invalid in 30 seconds -@smoke +@smoke @sink_status_error Scenario: Sink with invalid password Given the Orb user has a registered account And the Orb user logs in @@ -368,7 +408,7 @@ Scenario: Sink with invalid password And the container logs should contain the message "managing agent policy from core" within 30 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs should contain the message "scraped metrics for policy" within 180 seconds - And referred sink must have error state on response within 30 seconds + And referred sink must have error state on response within 120 seconds And 4 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -881,7 +921,7 @@ Scenario: Create duplicated policy Then this agent's heartbeat shows that 2 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -904,7 +944,7 @@ Scenario: Remove agent (check dataset) And last container created is exited after 120 seconds And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds -@smoke +@smoke @sink_status_error Scenario: Edit sink active and use invalid remote host Given the Orb user has a registered account And the Orb user logs in @@ -919,13 +959,13 @@ Scenario: Edit sink active and use invalid remote host And this agent's heartbeat shows that 10 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the sink remote host is edited and an invalid one is used Then referred sink must have error state on response within 120 seconds And 10 dataset(s) have validity valid and 0 have validity invalid in 30 seconds -@smoke +@smoke @sink_status_error Scenario: Edit sink active and use invalid username Given the Orb user has a registered account And the Orb user logs in @@ -940,13 +980,13 @@ Scenario: Edit sink active and use invalid username And this agent's heartbeat shows that 10 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the sink username is edited and an invalid one is used Then referred sink must have error state on response within 120 seconds And 10 dataset(s) have validity valid and 0 have validity invalid in 30 seconds -@smoke +@smoke @sink_status_error Scenario: Edit sink active and use invalid password Given the Orb user has a registered account And the Orb user logs in @@ -961,13 +1001,13 @@ Scenario: Edit sink active and use invalid password And this agent's heartbeat shows that 10 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the sink password is edited and an invalid one is used Then referred sink must have error state on response within 120 seconds And 10 dataset(s) have validity valid and 0 have validity invalid in 30 seconds -@smoke +@smoke @sink_status_error Scenario: Edit sink with invalid username and use valid one Given the Orb user has a registered account And the Orb user logs in @@ -982,14 +1022,14 @@ Scenario: Edit sink with invalid username and use valid one And this agent's heartbeat shows that 4 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have error state on response within 30 seconds + And referred sink must have error state on response within 120 seconds And 4 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And the sink username is edited and an valid one is used Then referred sink must have active state on response within 120 seconds And 4 dataset(s) have validity valid and 0 have validity invalid in 30 seconds -@smoke +@smoke @sink_status_error Scenario: Edit sink with password and use valid one Given the Orb user has a registered account And the Orb user logs in @@ -1004,7 +1044,7 @@ Scenario: Edit sink with password and use valid one When this agent's heartbeat shows that 4 policies are applied and all has status running And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have error state on response within 30 seconds + And referred sink must have error state on response within 120 seconds And 4 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And the sink password is edited and an valid one is used Then referred sink must have active state on response within 120 seconds diff --git a/python-test/features/integration_config_file.feature b/python-test/features/integration_config_file.feature index a1dbb36bc..e9bc88426 100644 --- a/python-test/features/integration_config_file.feature +++ b/python-test/features/integration_config_file.feature @@ -1,6 +1,26 @@ @integration_config_files @AUTORETRY Feature: Integration tests using agent provided via config file + +@private +Scenario: General smoke test to validate private agent image - using configuration files + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And 3 simple policies same input_type as created via config file are applied to the group + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + + ########### provisioning agents without specify pktvisor configs on backend @smoke @config_file @pktvisor_configs @@ -18,7 +38,7 @@ Scenario: provisioning agent without specify pktvisor binary path and path to co And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -38,7 +58,7 @@ Scenario: provisioning agent without specify pktvisor binary path (config file - And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -58,7 +78,7 @@ Scenario: provisioning agent without specify pktvisor path to config file (confi And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -78,7 +98,7 @@ Scenario: provisioning agent without specify pktvisor binary path and path to co And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -100,7 +120,7 @@ Scenario: provisioning agent without specify pktvisor binary path (config file - And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -122,7 +142,7 @@ Scenario: provisioning agent without specify pktvisor path to config file (confi And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -141,7 +161,7 @@ Scenario: tap_selector - any - matching 0 of all tags from an agent Then 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent - And this agent's heartbeat shows that 1 policies are applied and all has status failed_to_apply + And this agent's heartbeat shows that 1 policies are applied and all has status no_tap_match And the policy application error details must show that 422 no tap match found for specified 'input.tap_selector' tags And remove the agent .yaml generated on each scenario @@ -159,7 +179,7 @@ Scenario: tap_selector - any - matching 1 of all tags from an agent And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -177,7 +197,7 @@ Scenario: tap_selector - any - matching 1 of all tags (plus 1 random tag) from a And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -193,7 +213,7 @@ Scenario: tap_selector - all - matching 0 of all tags from an agent Then 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent - And this agent's heartbeat shows that 1 policies are applied and all has status failed_to_apply + And this agent's heartbeat shows that 1 policies are applied and all has status no_tap_match And the policy application error details must show that 422 no tap match found for specified 'input.tap_selector' tags And remove the agent .yaml generated on each scenario @@ -211,7 +231,7 @@ Scenario: tap_selector - all - matching 1 of all tags from an agent And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -230,7 +250,7 @@ Scenario: tap_selector - all - matching all tags from an agent And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -250,7 +270,7 @@ Scenario: agent pcap with only agent tags subscription to a group with policies And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -270,7 +290,7 @@ Scenario: agent pcap with only agent tags subscription to a group with policies And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -290,7 +310,7 @@ Scenario: agent pcap with mixed tags subscription to a group with policies creat And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -310,7 +330,7 @@ Scenario: agent pcap with mixed tags subscription to a group with policies creat And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -329,7 +349,7 @@ Scenario: agent pcap with only agent tags subscription to a group with policies And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -349,7 +369,7 @@ Scenario: agent pcap with only agent tags subscription to a group with policies And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -369,7 +389,7 @@ Scenario: agent pcap with mixed tags subscription to a group with policies creat And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -389,7 +409,7 @@ Scenario: agent pcap with mixed tags subscription to a group with policies creat And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -410,7 +430,7 @@ Scenario: agent flow with only agent tags subscription to a group with policies And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -430,7 +450,7 @@ Scenario: agent flow with only agent tags subscription to a group with policies And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -450,7 +470,7 @@ Scenario: agent flow with mixed tags subscription to a group with policies creat And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -470,7 +490,7 @@ Scenario: agent flow with mixed tags subscription to a group with policies creat And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -489,7 +509,7 @@ Scenario: agent flow with only agent tags subscription to a group with policies And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -509,7 +529,7 @@ Scenario: agent flow with only agent tags subscription to a group with policies And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -529,7 +549,7 @@ Scenario: agent flow with mixed tags subscription to a group with policies creat And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -549,7 +569,7 @@ Scenario: agent flow with mixed tags subscription to a group with policies creat And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -571,7 +591,7 @@ Scenario: agent dnstap with only agent tags subscription to a group with policie And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -591,7 +611,7 @@ Scenario: agent dnstap with only agent tags subscription to a group with policie And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -611,7 +631,7 @@ Scenario: agent dnstap with mixed tags subscription to a group with policies cre And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -631,7 +651,7 @@ Scenario: agent dnstap with mixed tags subscription to a group with policies cre And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -650,7 +670,7 @@ Scenario: agent dnstap with only agent tags subscription to a group with policie And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -670,7 +690,7 @@ Scenario: agent dnstap with only agent tags subscription to a group with policie And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -690,7 +710,7 @@ Scenario: agent dnstap with mixed tags subscription to a group with policies cre And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -710,7 +730,7 @@ Scenario: agent dnstap with mixed tags subscription to a group with policies cre And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -732,7 +752,7 @@ Scenario: agent netprobe with only agent tags subscription to a group with polic And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -752,7 +772,7 @@ Scenario: agent netprobe with only agent tags subscription to a group with polic And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -772,7 +792,7 @@ Scenario: agent netprobe with mixed tags subscription to a group with policies c And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -792,7 +812,7 @@ Scenario: agent netprobe with mixed tags subscription to a group with policies c And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -812,7 +832,7 @@ Scenario: agent netprobe with only agent tags subscription to a group with polic And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -833,7 +853,7 @@ Scenario: agent netprobe with only agent tags subscription to a group with polic And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -854,7 +874,7 @@ Scenario: agent netprobe with mixed tags subscription to a group with policies c And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario @@ -875,6 +895,6 @@ Scenario: agent netprobe with mixed tags subscription to a group with policies c And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario diff --git a/python-test/features/metrics.feature b/python-test/features/metrics.feature index 098d45c3d..2fcc5dcbd 100644 --- a/python-test/features/metrics.feature +++ b/python-test/features/metrics.feature @@ -16,7 +16,7 @@ Scenario: netprobe handler with default metric groups configuration And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario @@ -34,7 +34,7 @@ Scenario: netprobe handler with all metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario @@ -52,7 +52,7 @@ Scenario: netprobe handler with all metric groups disabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario @@ -70,7 +70,7 @@ Scenario: netprobe handler with only counters metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario @@ -88,7 +88,7 @@ Scenario: netprobe handler with only quantiles metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario @@ -106,7 +106,7 @@ Scenario: netprobe handler with only histograms metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario @@ -124,7 +124,7 @@ Scenario: netprobe handler with counters and histograms metric groups enabled an And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario @@ -142,7 +142,7 @@ Scenario: netprobe handler with counters and quantiles metric groups enabled and And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario @@ -160,7 +160,7 @@ Scenario: netprobe handler with histograms and quantiles metric groups enabled a And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario @@ -182,7 +182,7 @@ Scenario: flow handler type netflow with default metric groups configuration And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -203,7 +203,7 @@ Scenario: flow handler type netflow with all metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -224,7 +224,7 @@ Scenario: flow handler type netflow with all metric groups disabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -245,7 +245,7 @@ Scenario: flow handler type netflow with only cardinality metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -266,7 +266,7 @@ Scenario: flow handler type netflow with only counters metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -287,7 +287,7 @@ Scenario: flow handler type netflow with only by_packets metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -308,7 +308,7 @@ Scenario: flow handler type netflow with only by_bytes metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -329,7 +329,7 @@ Scenario: flow handler type netflow with only top_geo metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -350,7 +350,7 @@ Scenario: flow handler type netflow with only conversations metric groups enable And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -371,7 +371,7 @@ Scenario: flow handler type netflow with only top_ports metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -392,7 +392,7 @@ Scenario: flow handler type netflow with only top_ips_ports metric groups enable And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -413,7 +413,7 @@ Scenario: flow handler type netflow with only top_interfaces metric groups enabl And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -434,7 +434,7 @@ Scenario: flow handler type netflow with only top_ips metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -457,7 +457,7 @@ Scenario: flow handler type sflow with default metric groups configuration And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -478,7 +478,7 @@ Scenario: flow handler type sflow with all metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -499,7 +499,7 @@ Scenario: flow handler type sflow with all metric groups disabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -520,7 +520,7 @@ Scenario: flow handler type sflow with only cardinality metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -541,7 +541,7 @@ Scenario: flow handler type sflow with only counters metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -562,7 +562,7 @@ Scenario: flow handler type sflow with only by_packets metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -583,7 +583,7 @@ Scenario: flow handler type sflow with only by_bytes metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -604,7 +604,7 @@ Scenario: flow handler type sflow with only top_geo metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -625,7 +625,7 @@ Scenario: flow handler type sflow with only conversations metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -646,7 +646,7 @@ Scenario: flow handler type sflow with only top_ports metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -667,7 +667,7 @@ Scenario: flow handler type sflow with only top_ips_ports metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -688,7 +688,7 @@ Scenario: flow handler type sflow with only top_interfaces metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -709,7 +709,7 @@ Scenario: flow handler type sflow with only top_ips metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario @@ -731,7 +731,7 @@ Scenario: pcap handler with default metric groups configuration And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for pcap handler And remove the agent .yaml generated on each scenario @@ -751,7 +751,7 @@ Scenario: pcap handler with all metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for pcap handler And remove the agent .yaml generated on each scenario @@ -771,7 +771,7 @@ Scenario: pcap handler with all metric groups disabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for pcap handler And remove the agent .yaml generated on each scenario @@ -793,7 +793,7 @@ Scenario: bgp handler with default metric groups configuration And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data bgp.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for bgp handler And remove the agent .yaml generated on each scenario @@ -813,7 +813,7 @@ Scenario: bgp handler with all metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data bgp.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for bgp handler And remove the agent .yaml generated on each scenario @@ -833,7 +833,7 @@ Scenario: bgp handler with all metric groups disabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data bgp.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for bgp handler And remove the agent .yaml generated on each scenario @@ -855,7 +855,7 @@ Scenario: dhcp handler with default metric groups configuration And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dhcp handler And remove the agent .yaml generated on each scenario @@ -875,7 +875,7 @@ Scenario: dhcp handler with all metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dhcp handler And remove the agent .yaml generated on each scenario @@ -895,12 +895,12 @@ Scenario: dhcp handler with all metric groups disabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dhcp handler And remove the agent .yaml generated on each scenario -#### net +#### net v1.0 @sanity @metric_groups @metrics_net @root @mocked_interface Scenario: net handler with default metric groups configuration @@ -917,7 +917,7 @@ Scenario: net handler with default metric groups configuration And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario @@ -937,7 +937,7 @@ Scenario: net handler with all metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario @@ -957,7 +957,7 @@ Scenario: net handler with all metric groups disabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario @@ -977,7 +977,7 @@ Scenario: net handler with only cardinality metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario @@ -997,7 +997,7 @@ Scenario: net handler with only counters metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario @@ -1017,7 +1017,7 @@ Scenario: net handler with only top_geo metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario @@ -1037,12 +1037,12 @@ Scenario: net handler with only top_ips metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario -#### dns +#### dns v1.0 @sanity @metric_groups @metrics_dns @root @mocked_interface Scenario: dns handler with default metric groups configuration @@ -1059,7 +1059,7 @@ Scenario: dns handler with default metric groups configuration And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario @@ -1079,7 +1079,7 @@ Scenario: dns handler with all metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario @@ -1099,7 +1099,7 @@ Scenario: dns handler with all metric groups disabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario @@ -1119,7 +1119,7 @@ Scenario: dns handler with only top_ecs metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario @@ -1139,7 +1139,7 @@ Scenario: dns handler with only top_qnames_details metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario @@ -1159,7 +1159,7 @@ Scenario: dns handler with only cardinality metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario @@ -1179,7 +1179,7 @@ Scenario: dns handler with only counters metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario @@ -1199,7 +1199,7 @@ Scenario: dns handler with only dns_transaction metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario @@ -1219,7 +1219,7 @@ Scenario: dns handler with only top_qnames metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario @@ -1239,6 +1239,430 @@ Scenario: dns handler with only top_ports metric groups enabled And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 30 seconds + And referred sink must have active state on response within 120 seconds Then metrics must be correctly generated for dns handler + And remove the agent .yaml generated on each scenario + + +#### dns v2.0 + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with default metric groups configuration (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, default metric_groups enabled, default metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with all metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, all metric_groups enabled, none metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with all metric groups disabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, none metric_groups enabled, all metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with only top_ecs metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_ecs metric_groups enabled, cardinality, counters, top_qnames, top_ports, top_size, xact_times, quantiles, top_qtypes, top_rcodes metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with only top_ports metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_ports metric_groups enabled, top_ecs, cardinality, counters, top_qnames, top_size, xact_times, quantiles, top_qtypes, top_rcodes metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with only top_size metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_size metric_groups enabled, top_ecs, cardinality, counters, top_qnames, top_ports, xact_times, quantiles, top_qtypes, top_rcodes metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with only xact_times metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, xact_times metric_groups enabled, top_ecs, cardinality, counters, top_qnames, top_ports, top_size, quantiles, top_qtypes, top_rcodes metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with only cardinality metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, cardinality metric_groups enabled, top_ecs, counters, top_qnames, top_ports, top_size, xact_times, quantiles, top_qtypes, top_rcodes metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with only counters metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, counters metric_groups enabled, top_ecs, cardinality, top_qnames, top_ports, top_size, xact_times, quantiles, top_qtypes, top_rcodes metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with only top_qnames metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_qnames metric_groups enabled, top_ecs, cardinality, counters, top_ports, top_size, xact_times, quantiles, top_qtypes, top_rcodes metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with only quantiles metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, quantiles metric_groups enabled, top_ecs, cardinality, counters, top_qnames, top_ports, top_size, xact_times, top_qtypes, top_rcodes metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with only top_qtypes metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_qtypes metric_groups enabled, top_ecs, cardinality, counters, top_qnames, top_ports, top_size, xact_times, quantiles, top_rcodes metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +Scenario: dns handler with only top_rcodes metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_rcodes metric_groups enabled, top_ecs, cardinality, counters, top_qnames, top_ports, top_size, xact_times, quantiles, top_qtypes metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for dns-v2 handler + And remove the agent .yaml generated on each scenario + + +#### net v2.0 + +@sanity @metric_groups @metrics_net_v2 @root @mocked_interface +Scenario: net handler with default metric groups configuration (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a net policy pcap with tap_selector matching all tag(s) of the tap from an agent, default metric_groups enabled, default metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for net-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_net_v2 @root @mocked_interface +Scenario: net handler with all metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a net policy pcap with tap_selector matching all tag(s) of the tap from an agent, all metric_groups enabled, none metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for net-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_net_v2 @root @mocked_interface +Scenario: net handler with all metric groups disabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a net policy pcap with tap_selector matching all tag(s) of the tap from an agent, none metric_groups enabled, all metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for net-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_net_v2 @root @mocked_interface +Scenario: net handler with only cardinality metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a net policy pcap with tap_selector matching all tag(s) of the tap from an agent, cardinality metric_groups enabled, quantiles, counters, top_geo, top_ips metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for net-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_net_v2 @root @mocked_interface +Scenario: net handler with only counters metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a net policy pcap with tap_selector matching all tag(s) of the tap from an agent, counters metric_groups enabled, quantiles, cardinality, top_geo, top_ips metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for net-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_net_v2 @root @mocked_interface +Scenario: net handler with only top_geo metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a net policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_geo metric_groups enabled, quantiles, counters, cardinality, top_ips metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for net-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_net_v2 @root @mocked_interface +Scenario: net handler with only top_ips metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a net policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_ips metric_groups enabled, quantiles, counters, top_geo, cardinality metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for net-v2 handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_net_v2 @root @mocked_interface +Scenario: net handler with only quantiles metric groups enabled (v2) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a net policy pcap with tap_selector matching all tag(s) of the tap from an agent, quantiles metric_groups enabled, top_ips, counters, top_geo, cardinality metric_groups disabled and settings: {"require_version":"2.0"} is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + Then metrics must be correctly generated for net-v2 handler And remove the agent .yaml generated on each scenario \ No newline at end of file diff --git a/python-test/features/migration.feature b/python-test/features/migration.feature new file mode 100644 index 000000000..1b414dec6 --- /dev/null +++ b/python-test/features/migration.feature @@ -0,0 +1,93 @@ +@migration @AUTORETRY +Feature: Migration tests + +@pre-migration +Scenario: Agent legacy + sink legacy -> sink OTEL + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) and OTEL disabled already exists and is online within 30 seconds + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a/an legacy sink already exists (migration) + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + When the sink is updated and OTEL is enabled + And referred sink must have active state on response after 10 seconds + + +@pre-migration +Scenario: Agent legacy + sink OTEL + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) and OTEL disabled already exists and is online within 30 seconds + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a/an OTEL sink already exists (migration) + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + + +@pre-migration +Scenario: Adding policies to an Agent legacy after migrate sink legacy to sink OTEL + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) and OTEL disabled already exists and is online within 30 seconds + And pktvisor state is running + And that a/an legacy sink already exists (migration) + And the sink is updated and OTEL is enabled + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + When 2 simple policies are applied to the group + Then this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And referred sink must have active state on response within 120 seconds + + +@pre-migration +Scenario: Adding policies to an Agent legacy after migrate sink legacy to sink OTEL and agent legacy to otel + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) and OTEL disabled already exists and is online within 30 seconds + And pktvisor state is running + And that a/an legacy sink already exists (migration) + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And referred sink must have active state on response within 120 seconds + When the sink is updated and OTEL is enabled + And stop the orb-agent container + And the agent container is started on an available port and use otel:enabled env vars + And the agent status is online + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 2 policies are applied and all has status running + And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And referred sink must have active state on response after 120 seconds + And 2 simple policies are applied to the group + Then this agent's heartbeat shows that 4 policies are applied and all has status running + And 4 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And referred sink must have active state on response within 120 seconds + + +@pos-migration +Scenario: Check if all sinks are OTEL after migration + Given the Orb user has a registered account + And the Orb user logs in + Then all existing sinks must have OTEL enabled \ No newline at end of file diff --git a/python-test/features/policies.feature b/python-test/features/policies.feature index ef82e63a5..94e628ab1 100644 --- a/python-test/features/policies.feature +++ b/python-test/features/policies.feature @@ -5,7 +5,6 @@ Feature: policy creation Scenario: Create a policy with dns handler, description, host specification, bpf filter, pcap source, only qname suffix and only rcode Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online When a new policy is created using: handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.foo.com/ .example.com], only_rcode=0 Then referred policy must be listed on the orb policies list @@ -14,7 +13,6 @@ Feature: policy creation Scenario: Create a policy with dns handler, host specification, bpf filter, pcap source, only qname suffix and only rcode Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online When a new policy is created using: handler=dns, host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.foo.com/ .example.com], only_rcode=2 Then referred policy must be listed on the orb policies list @@ -23,7 +21,6 @@ Feature: policy creation Scenario: Create a policy with dns handler, bpf filter, pcap source, only qname suffix and only rcode Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online When a new policy is created using: handler=dns, bpf_filter_expression=udp port 53, pcap_source=af_packet, only_qname_suffix=[.foo.com/ .example.com], only_rcode=3 Then referred policy must be listed on the orb policies list @@ -32,7 +29,6 @@ Feature: policy creation Scenario: Create a policy with dns handler, pcap source, only qname suffix and only rcode Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online When a new policy is created using: handler=dns, pcap_source=af_packet, only_qname_suffix=[.foo.com/ .example.com], only_rcode=5 Then referred policy must be listed on the orb policies list @@ -41,7 +37,6 @@ Feature: policy creation Scenario: Create a policy with dns handler, only qname suffix Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online When a new policy is created using: handler=dns, only_qname_suffix=[.foo.com/ .example.com] Then referred policy must be listed on the orb policies list @@ -50,7 +45,6 @@ Feature: policy creation Scenario: Create a policy with dhcp handler, description, host specification, bpf filter and pcap source Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online When a new policy is created using: handler=dhcp, description='policy_dhcp', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap Then referred policy must be listed on the orb policies list @@ -59,7 +53,6 @@ Feature: policy creation Scenario: Create a policy with net handler, description, host specification, bpf filter and pcap source Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online When a new policy is created using: handler=net, description='policy_net', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap Then referred policy must be listed on the orb policies list @@ -68,7 +61,6 @@ Feature: policy creation Scenario: Create duplicated net policy without insert new name Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online And a new policy is created using: handler=net, description='policy_net' When try to duplicate this policy 4 times without set new name Then 3 policies must be successfully duplicated and 1 must return an error @@ -78,7 +70,6 @@ Scenario: Create duplicated net policy without insert new name Scenario: Create duplicated dhcp policy without insert new name Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online And a new policy is created using: handler=dhcp, description='policy_dhcp' When try to duplicate this policy 4 times without set new name Then 3 policies must be successfully duplicated and 1 must return an error @@ -88,7 +79,6 @@ Scenario: Create duplicated dhcp policy without insert new name Scenario: Create duplicated dns policy without insert new name Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online And a new policy is created using: handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.foo.com/ .example.com], only_rcode=0 When try to duplicate this policy 4 times without set new name Then 3 policies must be successfully duplicated and 1 must return an error @@ -98,7 +88,6 @@ Scenario: Create duplicated dns policy without insert new name Scenario: Create 4 duplicated policy with new name Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online And a new policy is created using: handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.foo.com/ .example.com], only_rcode=0 When try to duplicate this policy 4 times with a random new name Then 4 policies must be successfully duplicated and 0 must return an error @@ -108,7 +97,6 @@ Scenario: Create 4 duplicated policy with new name Scenario: Create 3 duplicated dns policy without insert new name and 1 with new name Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online And a new policy is created using: handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.foo.com/ .example.com], only_rcode=0 When try to duplicate this policy 3 times without set new name And 3 policies must be successfully duplicated and 0 must return an error diff --git a/python-test/features/steps/control_plane_agent_groups.py b/python-test/features/steps/control_plane_agent_groups.py index 4c7a8522b..7d2eb70db 100644 --- a/python-test/features/steps/control_plane_agent_groups.py +++ b/python-test/features/steps/control_plane_agent_groups.py @@ -266,7 +266,7 @@ def clean_agent_groups(context): delete_agent_groups(token, agent_groups_filtered_list) -@given("referred agent is subscribed to {amount_of_groups} {group}") +@step("referred agent is subscribed to {amount_of_groups} {group}") def subscribe_agent_to_a_group(context, amount_of_groups, group): assert_that(group, any_of(equal_to("group"), equal_to("groups")), "Unexpected word on step description") agent = context.agent diff --git a/python-test/features/steps/control_plane_agents.py b/python-test/features/steps/control_plane_agents.py index 32a44661f..54e0abd5c 100644 --- a/python-test/features/steps/control_plane_agents.py +++ b/python-test/features/steps/control_plane_agents.py @@ -46,6 +46,33 @@ def check_if_agents_exist(context, orb_tags, status): f"Agent logs: {get_orb_agent_logs(context.container_id)}." f"\nLogs: {logs}") +# this step is only necessary for OTEL migration tests, so we can exclude it after the migration +@given("that an agent with {orb_tags} orb tag(s) and OTEL {otel_type} already exists and is {status} within {timeout} seconds") +def check_if_agents_exist(context, orb_tags, otel_type, status, timeout): + timeout = int(timeout) + assert_that(otel_type, any_of("enabled", "disabled"), "Invalid otel type") + otel_map = {"enabled":"true", "disabled":"false"} + context.agent_name = generate_random_string_with_predefined_prefix(agent_name_prefix) + context.orb_tags = create_tags_set(orb_tags) + context.agent = create_agent(context.token, context.agent_name, context.orb_tags) + context.agent_key = context.agent["key"] + token = context.token + run_local_agent_container(context, "available", include_otel_env_var="true", enable_otel=otel_map[otel_type]) + agent_id = context.agent['id'] + existing_agents = get_agent(token, agent_id) + assert_that(len(existing_agents), greater_than(0), "Agent not created") + logs = get_orb_agent_logs(context.container_id) + agent_status, context.agent = wait_until_expected_agent_status(token, agent_id, status, timeout=timeout) + assert_that(agent_status, is_(equal_to(status)), + f"Agent did not get '{status}' after {str(timeout)} seconds, but was '{agent_status}'. \n" + f"Agent: {json.dumps(context.agent, indent=4)}. \n Logs: {logs}") + local_orb_path = configs.get("local_orb_path") + agent_schema_path = local_orb_path + "/python-test/features/steps/schemas/agent_schema.json" + is_schema_valid = validate_json(context.agent, agent_schema_path) + assert_that(is_schema_valid, equal_to(True), f"Invalid agent json. \n Agent = {context.agent}." + f"Agent logs: {get_orb_agent_logs(context.container_id)}." + f"\nLogs: {logs}") + @step('a new agent is created with {orb_tags} orb tag(s)') def agent_is_created(context, orb_tags): diff --git a/python-test/features/steps/control_plane_policies.py b/python-test/features/steps/control_plane_policies.py index f2945ef6a..238f4c592 100644 --- a/python-test/features/steps/control_plane_policies.py +++ b/python-test/features/steps/control_plane_policies.py @@ -468,8 +468,7 @@ def check_duplicated_policies_status(context, amount_successfully_policies, amou wrongly_duplicated += 1 assert_that(len(successfully_duplicated), equal_to(int(amount_successfully_policies)), f"Amount of policies successfully duplicated fails." - f"Policies duplicated: {successfully_duplicated}" - f"\n Agent: {json.dumps(context.agent, indent=4)}") + f"Policies duplicated: {successfully_duplicated}") assert_that(wrongly_duplicated, equal_to(int(amount_error_policies)), f"Amount of policies wrongly duplicated fails" f".") @@ -1176,7 +1175,7 @@ class HandlerModules(HandlerConfigs): def __init__(self): self.handler_modules = dict() - def __build_module(self, name, module_type, configs_list, filters_list): + def __build_module(self, name, module_type, configs_list, filters_list, require_version=None): module = { name: { "type": module_type, @@ -1189,6 +1188,8 @@ def __build_module(self, name, module_type, configs_list, filters_list): } } } + if require_version is not None: + module[name]["require_version"] = require_version module = UtilsManager.update_object_with_filters_and_configs(self, module, name, configs_list, filters_list) @@ -1219,6 +1220,7 @@ def add_dns_module(self, name, settings=None): self.geoloc_notfound = {'geoloc_notfound': settings_json.get("geoloc_notfound", None)} self.asn_notfound = {'asn_notfound': settings_json.get("asn_notfound", None)} self.dnstap_msg_type = {'dnstap_msg_type': settings_json.get("dnstap_msg_type", None)} + self.require_version = settings_json.get("require_version", None) dns_configs = [self.public_suffix_list] @@ -1226,7 +1228,7 @@ def add_dns_module(self, name, settings=None): self.only_qtype, self.only_qname_suffix, self.geoloc_notfound, self.asn_notfound, self.dnstap_msg_type] - self.__build_module(self.name, "dns", dns_configs, dns_filters) + self.__build_module(self.name, "dns", dns_configs, dns_filters, self.require_version) return self.handler_modules def add_net_module(self, name, settings=None): @@ -1238,12 +1240,13 @@ def add_net_module(self, name, settings=None): self.asn_notfound = {'asn_notfound': settings_json.get('asn_notfound', None)} self.only_geoloc_prefix = {'only_geoloc_prefix': settings_json.get('only_geoloc_prefix', None)} self.only_asn_number = {'only_asn_number': settings_json.get('only_asn_number', None)} + self.require_version = settings_json.get("require_version", None) net_configs = [] net_filters = [self.geoloc_notfound, self.asn_notfound, self.only_geoloc_prefix, self.only_asn_number] - self.__build_module(self.name, "net", net_configs, net_filters) + self.__build_module(self.name, "net", net_configs, net_filters, self.require_version) return self.handler_modules def add_dhcp_module(self, name): diff --git a/python-test/features/steps/control_plane_sink.py b/python-test/features/steps/control_plane_sink.py index e3eac5d01..d4d70a5b0 100644 --- a/python-test/features/steps/control_plane_sink.py +++ b/python-test/features/steps/control_plane_sink.py @@ -3,6 +3,7 @@ from utils import random_string, filter_list_by_parameter_start_with, threading_wait_until, validate_json from hamcrest import * import requests +import threading configs = TestConfig.configs() sink_name_prefix = "test_sink_label_name_" @@ -28,14 +29,20 @@ def check_prometheus_grafana_credentials(context): @step("a new sink is created") -def create_sink(context): +def create_sink(context, **kwargs): sink_label_name = sink_name_prefix + random_string(10) token = context.token endpoint = context.remote_prometheus_endpoint username = context.prometheus_username password = context.prometheus_key - include_otel_env_var = configs.get("include_otel_env_var") - enable_otel = configs.get("enable_otel") + if "enable_otel" in kwargs: # this if/else logic can be removed after otel migration + kwargs["enable_otel"] = kwargs["enable_otel"].lower() + assert_that(kwargs["enable_otel"], any_of("true", "false")) + include_otel_env_var = "true" + enable_otel = kwargs["enable_otel"] + else: + include_otel_env_var = configs.get("include_otel_env_var") + enable_otel = configs.get("enable_otel") otel_map = {"true": "enabled", "false": "disabled"} if include_otel_env_var == "true": context.sink = create_new_sink(token, sink_label_name, endpoint, username, password, @@ -87,6 +94,15 @@ def new_sink(context): create_sink(context) +# this step is only necessary for OTEL migration tests, so we can exclude it after the migration +@step("that a/an {sink_type} sink already exists (migration)") +def new_sink(context, sink_type): + assert_that(sink_type, any_of("OTEL", "legacy"), "Unexpected type of sink") + otel_map = {"OTEL": "true", "legacy": "false"} + check_prometheus_grafana_credentials(context) + create_sink(context, enable_otel=otel_map[sink_type]) + + @step("that {amount_of_sinks} sinks already exists") def new_multiple_sinks(context, amount_of_sinks): check_prometheus_grafana_credentials(context) @@ -142,6 +158,47 @@ def check_sink_status(context, status, time_to_wait): assert_that(get_sink_response['state'], equal_to(status), f"Sink {context.sink} state failed") +@step("referred sink must have {status} state on response after {time_to_wait} seconds") +def check_sink_status(context, status, time_to_wait): + sink_id = context.sink["id"] + assert_that(time_to_wait.isdigit(), is_(True), f"Invalid type: 'time_to_wait' must be an int and is {time_to_wait}") + time_to_wait = int(time_to_wait) + threading.Event().wait(time_to_wait) + get_sink_response = get_sink_status_and_check(context.token, sink_id, status) + + assert_that(get_sink_response['state'], equal_to(status), f"Sink {context.sink} state failed") + + +# this step is only necessary for OTEL migration tests, so we can exclude it after the migration +@step("the sink is updated and OTEL is {otel}") +def edit_sink_field(context, otel): + assert_that(otel, any_of("enabled", "disabled")) + sink = get_sink(context.token, context.sink['id']) + sink['config']['password'] = configs.get('prometheus_key') + sink['config']["opentelemetry"] = otel + context.sink = edit_sink(context.token, context.sink['id'], sink) + + +# this step is only necessary for OTEL migration tests, so we can exclude it after the migration +@step("all existing sinks must have OTEL enabled") +def check_all_sinks_status(context): + all_sinks = list_sinks(context.token) + otel_sinks = list() + legacy_sinks = list() + migrated_sinks = list() + for sink in all_sinks: + if "opentelemetry" in sink["config"].keys() and sink["config"]["opentelemetry"] == "enabled": + otel_sinks.append(sink["id"]) + else: + legacy_sinks.append(sink["id"]) + + if "migrated" in sink["config"].keys() and sink["config"]["migrated"] == "m3": + migrated_sinks.append(sink["id"]) + print(f"{len(migrated_sinks)} sinks were migrated") + assert_that(len(otel_sinks), equal_to(len(all_sinks)), f"{len(legacy_sinks)} sinks are not with otel tag enabled: " + f"{legacy_sinks}") + + @step("the sink {field_to_edit} is edited and an {type_of_field} one is used") def edit_sink_field(context, field_to_edit, type_of_field): assert_that(field_to_edit, any_of("remote host", "username", "password", "name", "description", "tags"), diff --git a/python-test/features/steps/local_agent.py b/python-test/features/steps/local_agent.py index 0c389d4b4..038e3aacf 100644 --- a/python-test/features/steps/local_agent.py +++ b/python-test/features/steps/local_agent.py @@ -29,9 +29,13 @@ def check_metrics_by_handler(context, handler_type): expected_metrics_not_present = expected_metrics.difference(metrics_present) if expected_metrics_not_present == set(): expected_metrics_not_present = None + else: + expected_metrics_not_present = sorted(expected_metrics_not_present) extra_metrics_present = metrics_present.difference(expected_metrics) if extra_metrics_present == set(): extra_metrics_present = None + else: + extra_metrics_present = sorted(extra_metrics_present) assert_that(correct_metrics, equal_to(True), f"Metrics are not the expected. " f"Metrics expected that are not present: {expected_metrics_not_present}." f"Extra metrics present: {extra_metrics_present}") @@ -41,8 +45,17 @@ def check_metrics_by_handler(context, handler_type): def run_local_agent_container(context, status_port, **kwargs): use_orb_live_address_pattern = configs.get("use_orb_live_address_pattern") verify_ssl = configs.get('verify_ssl') + if "include_otel_env_var" in kwargs: # this if/else logic can be removed after otel migration (only else is needed) + include_otel_env_var = kwargs["include_otel_env_var"] + else: + include_otel_env_var = configs.get("include_otel_env_var") + if "enable_otel" in kwargs: # this if/else logic can be removed after otel migration (only else is needed) + enable_otel = kwargs["enable_otel"] + else: + enable_otel = configs.get("enable_otel") + env_vars = create_agent_env_vars_set(context.agent['id'], context.agent['channel_id'], context.agent_key, - verify_ssl, use_orb_live_address_pattern) + verify_ssl, use_orb_live_address_pattern, include_otel_env_var, enable_otel) env_vars.update(kwargs) assert_that(status_port, any_of(equal_to("available"), equal_to("unavailable")), "Unexpected value for port") availability = {"available": True, "unavailable": False} @@ -55,7 +68,7 @@ def run_local_agent_container(context, status_port, **kwargs): if context.port != 10583: env_vars["ORB_BACKENDS_PKTVISOR_API_PORT"] = str(context.port) - context.container_id = run_agent_container(agent_image, env_vars, LOCAL_AGENT_CONTAINER_NAME + + context.container_id = run_agent_container(agent_image, env_vars, LOCAL_AGENT_CONTAINER_NAME + random_string(2) + context.agent['name'][-5:]) if context.container_id not in context.containers_id.keys(): context.containers_id[context.container_id] = str(context.port) @@ -69,24 +82,26 @@ def run_local_agent_container(context, status_port, **kwargs): f"\n Logs:{logs}") -@step('the agent container is started on an {status_port} port and use {agent_tap} env vars') -def run_local_agents_with_extra_env_vars(context, status_port, agent_tap): - agent_tap = agent_tap.upper() - assert_that(agent_tap, any_of("PCAP", "NETFLOW", "SFLOW", "DNSTAP", "ALL")) +@step('the agent container is started on an {status_port} port and use {group} env vars') +def run_local_agents_with_extra_env_vars(context, status_port, group): + group = group.upper() + assert_that(group, any_of("PCAP", "NETFLOW", "SFLOW", "DNSTAP", "ALL", "OTEL:ENABLED")) vars_by_input = { "PCAP": {"PKTVISOR_PCAP_IFACE_DEFAULT": configs.get("orb_agent_interface", "auto")}, "NETFLOW": {"PKTVISOR_NETFLOW": "true", "PKTVISOR_NETFLOW_PORT_DEFAULT": 9995}, "SFLOW": {"PKTVISOR_SFLOW": "true", "PKTVISOR_SFLOW_PORT_DEFAULT": 9994}, - "DNSTAP": {"PKTVISOR_DNSTAP": "true", "PKTVISOR_DNSTAP_PORT_DEFAULT": 9990} + "DNSTAP": {"PKTVISOR_DNSTAP": "true", "PKTVISOR_DNSTAP_PORT_DEFAULT": 9990}, + # this line below is only necessary for OTEL migration tests, so we can exclude it after the migration + "OTEL:ENABLED": {"ORB_OTEL_ENABLE": "true"} } - if agent_tap == "ALL": + if group == "ALL": vars_by_input["ALL"] = dict() vars_by_input["ALL"].update(vars_by_input["PCAP"]) vars_by_input["ALL"].update(vars_by_input["NETFLOW"]) vars_by_input["ALL"].update(vars_by_input["SFLOW"]) vars_by_input["ALL"].update(vars_by_input["DNSTAP"]) - run_local_agent_container(context, status_port, **vars_by_input[agent_tap]) + run_local_agent_container(context, status_port, **vars_by_input[group]) @step('the container logs that were output after {condition} contain the message "{text_to_match}" within' @@ -202,7 +217,7 @@ def remove_all_orb_agent_test_containers(context): def create_agent_env_vars_set(agent_id, agent_channel_id, agent_mqtt_key, verify_ssl, - use_orb_live_address_pattern): + use_orb_live_address_pattern, include_otel_env_var, enable_otel): """ Create the set of environmental variables to be passed to the agent :param agent_id: id of the agent @@ -211,11 +226,11 @@ def create_agent_env_vars_set(agent_id, agent_channel_id, agent_mqtt_key, verify :param verify_ssl: ignore process to verify tls if false :param use_orb_live_address_pattern: if true, uses the shortcut orb_cloud_address. if false sets api and mqtt address. + :param include_otel_env_var: If true, use the environmental variable "ORB_OTEL_ENABLE" on agent provisioning command + :param enable_otel: Value to be used in variable "ORB_OTEL_ENABLE" :return: set of environmental variables """ orb_address = configs.get('orb_address') - include_otel_env_var = configs.get("include_otel_env_var") - enable_otel = configs.get("enable_otel") env_vars = {"ORB_CLOUD_MQTT_ID": agent_id, "ORB_CLOUD_MQTT_CHANNEL_ID": agent_channel_id, "ORB_CLOUD_MQTT_KEY": agent_mqtt_key} diff --git a/python-test/features/steps/metrics.py b/python-test/features/steps/metrics.py index 79f58601f..f13879b6b 100644 --- a/python-test/features/steps/metrics.py +++ b/python-test/features/steps/metrics.py @@ -33,10 +33,13 @@ def default_enabled_metric_groups_by_handler(handler): """ assert_that(isinstance(handler, str), equal_to(True), f"Invalid handler type {handler}. Handler must be str type") handler = handler.lower() - assert_that(handler, any_of("dns", "net", "dhcp", "bgp", "pcap", "flow", "netprobe"), "Invalid handler") + assert_that(handler, any_of("dns", "dns-v2", "net", "net-v2", "dhcp", "bgp", "pcap", "flow", "netprobe"), + "Invalid handler") groups_default_enabled = { "dns": ["cardinality", "counters", "dns_transaction", "top_qnames", "top_ports"], + "dns-v2": ["cardinality", "counters", "top_qnames", "quantiles", "top_qtypes", "top_rcodes"], "net": ["cardinality", "counters", "top_geo", "top_ips"], + "net-v2": ["cardinality", "counters", "top_geo", "top_ips", "quantiles"], "dhcp": [], "bgp": [], "pcap": [], @@ -148,6 +151,82 @@ def expected_metrics_by_handlers_and_groups(handler, groups_enabled, groups_disa ("all" in groups_enabled and "top_ports" not in groups_disabled): metric_groups.add("dns_top_udp_ports") + elif isinstance(handler, str) and handler.lower() == "dns-v2": + metric_groups = { + "dns_observed_packets", + "dns_deep_sampled_packets", + "dns_rates_events_sum", + "dns_rates_events_count", + "dns_rates_events" + } + if ("cardinality" in groups_enabled and "cardinality" not in groups_disabled) or \ + ("all" in groups_enabled and "cardinality" not in groups_disabled): + metric_groups.add("dns_cardinality_qname") + if ("counters" in groups_enabled and "counters" not in groups_disabled) or \ + ("all" in groups_enabled and "counters" not in groups_disabled): + metric_groups.add("dns_xacts") + metric_groups.add("dns_udp_xacts") + metric_groups.add("dns_timeout_queries") + metric_groups.add("dns_tcp_xacts") + metric_groups.add("dns_srvfail_xacts") + metric_groups.add("dns_refused_xacts") + metric_groups.add("dns_orphan_responses") + metric_groups.add("dns_nxdomain_xacts") + metric_groups.add("dns_noerror_xacts") + metric_groups.add("dns_nodata_xacts") + metric_groups.add("dns_ipv6_xacts") + metric_groups.add("dns_ipv4_xacts") + metric_groups.add("dns_filtered_packets") + metric_groups.add("dns_ecs_xacts") + metric_groups.add("dns_dot_xacts") + metric_groups.add("dns_doq_xacts") + metric_groups.add("dns_doh_xacts") + metric_groups.add("dns_dnscrypt_udp_xacts") + metric_groups.add("dns_dnscrypt_tcp_xacts") + metric_groups.add("dns_checking_disabled_xacts") + metric_groups.add("dns_authoritative_answer_xacts") + metric_groups.add("dns_authenticated_data_xacts") + if ("quantiles" in groups_enabled and "quantiles" not in groups_disabled) or \ + ("all" in groups_enabled and "quantiles" not in groups_disabled): + metric_groups.add("dns_xact_rates_sum") + metric_groups.add("dns_xact_rates_count") + metric_groups.add("dns_xact_rates") + if ("top_ecs" in groups_enabled and "top_ecs" not in groups_disabled) or \ + ("all" in groups_enabled and "top_ecs" not in groups_disabled): + metric_groups.add("dns_top_geo_loc_ecs_xacts") + metric_groups.add("dns_top_ecs_xacts") + metric_groups.add("dns_top_asn_ecs_xacts") + if ("top_ports" in groups_enabled and "top_ports" not in groups_disabled) or \ + ("all" in groups_enabled and "top_ports" not in groups_disabled): + metric_groups.add("dns_top_udp_ports_xacts") + if ("top_qnames" in groups_enabled and "top_qnames" not in groups_disabled) or \ + ("all" in groups_enabled and "top_qnames" not in groups_disabled): + metric_groups.add("dns_top_qname3_xacts") + metric_groups.add("dns_top_qname2_xacts") + if ("top_qtypes" in groups_enabled and "top_qtypes" not in groups_disabled) or \ + ("all" in groups_enabled and "top_qtypes" not in groups_disabled): + metric_groups.add("dns_top_qtype_xacts") + if ("top_rcodes" in groups_enabled and "top_rcodes" not in groups_disabled) or \ + ("all" in groups_enabled and "top_rcodes" not in groups_disabled): + metric_groups.add("dns_top_srvfail_xacts") + metric_groups.add("dns_top_refused_xacts") + metric_groups.add("dns_top_rcode_xacts") + metric_groups.add("dns_top_nxdomain_xacts") + metric_groups.add("dns_top_noerror_xacts") + metric_groups.add("dns_top_nodata_xacts") + if ("top_size" in groups_enabled and "top_size" not in groups_disabled) or \ + ("all" in groups_enabled and "top_size" not in groups_disabled): + metric_groups.add("dns_top_response_bytes") + metric_groups.add("dns_response_query_size_ratio_sum") + metric_groups.add("dns_response_query_size_ratio_count") + metric_groups.add("dns_response_query_size_ratio") + if ("xact_times" in groups_enabled and "xact_times" not in groups_disabled) or \ + ("all" in groups_enabled and "xact_times" not in groups_disabled): + metric_groups.add("dns_xact_time_us_sum") + metric_groups.add("dns_xact_time_us_count") + metric_groups.add("dns_xact_time_us") + # todo find a way to test slow metrics + # metric_groups.add("dns_top_slow_xacts") elif isinstance(handler, str) and handler.lower() == "net": metric_groups = { "packets_deep_samples", @@ -206,6 +285,49 @@ def expected_metrics_by_handlers_and_groups(handler, groups_enabled, groups_disa metric_groups.add("packets_top_ipv4") metric_groups.add("packets_top_ipv6") + elif isinstance(handler, str) and handler.lower() == "net-v2": + metric_groups = { + "net_deep_sampled_packets", + "net_observed_packets", + "net_rates_observed_pps", + "net_rates_observed_pps_count", + "net_rates_observed_pps_sum" + } + if ("cardinality" in groups_enabled and "cardinality" not in groups_disabled) or \ + ("all" in groups_enabled and "cardinality" not in groups_disabled): + metric_groups.add("net_cardinality_ips") + if ("counters" in groups_enabled and "counters" not in groups_disabled) or \ + ("all" in groups_enabled and "counters" not in groups_disabled): + metric_groups.add("net_filtered_packets") + metric_groups.add("net_ipv4_packets") + metric_groups.add("net_ipv6_packets") + metric_groups.add("net_other_l4_packets") + metric_groups.add("net_tcp_packets") + metric_groups.add("net_tcp_syn_packets") + metric_groups.add("net_total_packets") + metric_groups.add("net_udp_packets") + + if ("top_geo" in groups_enabled and "top_geo" not in groups_disabled) or \ + ("all" in groups_enabled and "top_geo" not in groups_disabled): + metric_groups.add("net_top_asn_packets") + metric_groups.add("net_top_geo_loc_packets") + + if ("top_ips" in groups_enabled and "top_ips" not in groups_disabled) or \ + ("all" in groups_enabled and "top_ips" not in groups_disabled): + metric_groups.add("net_top_ipv4_packets") + metric_groups.add("net_top_ipv6_packets") + if ("quantiles" in groups_enabled and "quantiles" not in groups_disabled) or \ + ("all" in groups_enabled and "quantiles" not in groups_disabled): + metric_groups.add("net_payload_size_bytes") + metric_groups.add("net_payload_size_bytes_count") + metric_groups.add("net_payload_size_bytes_sum") + metric_groups.add("net_rates_bps") + metric_groups.add("net_rates_bps_count") + metric_groups.add("net_rates_bps_sum") + metric_groups.add("net_rates_pps") + metric_groups.add("net_rates_pps_count") + metric_groups.add("net_rates_pps_sum") + elif isinstance(handler, str) and handler.lower() == "dhcp": metric_groups = { "dhcp_rates_total", @@ -332,16 +454,16 @@ def expected_metrics_by_handlers_and_groups(handler, groups_enabled, groups_disa ("all" in groups_enabled and "top_ips_ports" not in groups_disabled): if ("by_bytes" in groups_enabled and "by_bytes" not in groups_disabled) or \ ("all" in groups_enabled and "by_bytes" not in groups_disabled): - metric_groups.add("flow_top_in_dst_ips_and_port_bytes") - metric_groups.add("flow_top_in_src_ips_and_port_bytes") - metric_groups.add("flow_top_out_dst_ips_and_port_bytes") - metric_groups.add("flow_top_out_src_ips_and_port_bytes") + metric_groups.add("flow_top_in_dst_ip_ports_bytes") + metric_groups.add("flow_top_in_src_ip_ports_bytes") + metric_groups.add("flow_top_out_dst_ip_ports_bytes") + metric_groups.add("flow_top_out_src_ip_ports_bytes") if ("by_packets" in groups_enabled and "by_packets" not in groups_disabled) or \ ("all" in groups_enabled and "by_packets" not in groups_disabled): - metric_groups.add("flow_top_in_dst_ips_and_port_packets") - metric_groups.add("flow_top_in_src_ips_and_port_packets") - metric_groups.add("flow_top_out_dst_ips_and_port_packets") - metric_groups.add("flow_top_out_src_ips_and_port_packets") + metric_groups.add("flow_top_in_dst_ip_ports_packets") + metric_groups.add("flow_top_in_src_ip_ports_packets") + metric_groups.add("flow_top_out_dst_ip_ports_packets") + metric_groups.add("flow_top_out_src_ip_ports_packets") if ("top_geo" in groups_enabled and "top_geo" not in groups_disabled) or \ ("all" in groups_enabled and "top_geo" not in groups_disabled): if ("by_bytes" in groups_enabled and "by_bytes" not in groups_disabled) or \ diff --git a/sinker/backend/pktvisor/pktvisor.go b/sinker/backend/pktvisor/pktvisor.go index fc9420a81..ee72dee52 100644 --- a/sinker/backend/pktvisor/pktvisor.go +++ b/sinker/backend/pktvisor/pktvisor.go @@ -40,6 +40,7 @@ type metricAppendix struct { format string tags map[string]string logger *zap.Logger + warning string } func (p pktvisorBackend) ProcessMetrics(agent *pb.AgentInfoRes, agentID string, data fleet.AgentMetricsRPCPayload) ([]prometheus.TimeSeries, error) { @@ -75,6 +76,7 @@ func (p pktvisorBackend) ProcessMetrics(agent *pb.AgentInfoRes, agentID string, deviceIF: "", handlerLabel: "", format: "prom_sinker", + warning: "Deprecated, soon we will substitute for openTelemetry, check https://orb.community/documentation to how enable openTelemetry in your agent", tags: tags, logger: p.logger, } diff --git a/sinker/config/repo.go b/sinker/config/repo.go index d11d9a40e..931fd0c14 100644 --- a/sinker/config/repo.go +++ b/sinker/config/repo.go @@ -4,6 +4,8 @@ package config +import "context" + type ConfigRepo interface { Exists(ownerID string, sinkID string) bool Add(config SinkConfig) error @@ -12,4 +14,7 @@ type ConfigRepo interface { Edit(config SinkConfig) error GetAll(ownerID string) ([]SinkConfig, error) GetAllOwners() ([]string, error) + DeployCollector(ctx context.Context, config SinkConfig) error + AddActivity(ownerID string, sinkID string) error + GetActivity(ownerID string, sinkID string) (int64, error) } diff --git a/sinker/config/types.go b/sinker/config/types.go index 7522a46a2..122a3ced3 100644 --- a/sinker/config/types.go +++ b/sinker/config/types.go @@ -26,6 +26,7 @@ const ( Active Error Idle + Warning ) type PrometheusState int @@ -35,6 +36,7 @@ var promStateMap = [...]string{ "active", "error", "idle", + "warning", } var promStateRevMap = map[string]PrometheusState{ @@ -42,6 +44,7 @@ var promStateRevMap = map[string]PrometheusState{ "active": Active, "error": Error, "idle": Idle, + "warning": Warning, } func (p PrometheusState) String() string { diff --git a/sinker/config_state_check.go b/sinker/config_state_check.go index 1cea14464..bdab01fdd 100644 --- a/sinker/config_state_check.go +++ b/sinker/config_state_check.go @@ -35,17 +35,7 @@ func (svc *SinkerService) checkState(_ time.Time) { // Set idle if the sinker is more than 30 minutes not sending metrics (Remove from Redis) if cfg.LastRemoteWrite.Add(DefaultTimeout).Before(time.Now()) { if cfg.State == config.Active { - if cfg.Opentelemetry == "enabled" { - err := cfg.State.SetFromString("idle") - if err != nil { - svc.logger.Error("error updating otel sink state", zap.Error(err)) - return - } - if err := svc.sinkerCache.Edit(cfg); err != nil { - svc.logger.Error("error updating otel sink config cache to idle", zap.Error(err)) - return - } - } else { + if cfg.Opentelemetry != "enabled" { if err := svc.sinkerCache.Remove(cfg.OwnerID, cfg.SinkID); err != nil { svc.logger.Error("error updating sink config cache", zap.Error(err)) return diff --git a/sinker/message_handler.go b/sinker/message_handler.go index 03a499891..a4fe045db 100644 --- a/sinker/message_handler.go +++ b/sinker/message_handler.go @@ -28,9 +28,10 @@ func (svc SinkerService) remoteWriteToPrometheus(tsList prometheus.TSList, owner svc.logger.Error("unable to retrieve the sink config", zap.Error(err)) return err } + ctx := context.Background() if cfgRepo.Opentelemetry == "enabled" { - svc.logger.Info("ignoring sink state update on OpenTelemetry sinks") - return nil + svc.logger.Info("deprecate warning opentelemetry sink scraping legacy agent", zap.String("sink-ID", cfgRepo.SinkID)) + ctx = context.WithValue(ctx, "deprecation", "opentelemetry") } cfg := prometheus.NewConfig( prometheus.WriteURLOption(cfgRepo.Url), @@ -44,7 +45,7 @@ func (svc SinkerService) remoteWriteToPrometheus(tsList prometheus.TSList, owner var headers = make(map[string]string) headers["Authorization"] = svc.encodeBase64(cfgRepo.User, cfgRepo.Password) - result, writeErr := promClient.WriteTimeSeries(context.Background(), tsList, prometheus.WriteOptions{Headers: headers}) + result, writeErr := promClient.WriteTimeSeries(ctx, tsList, prometheus.WriteOptions{Headers: headers}) if err := error(writeErr); err != nil { if cfgRepo.State != config.Error || cfgRepo.Msg != fmt.Sprint(err) { cfgRepo.State = config.Error @@ -128,7 +129,7 @@ func (svc SinkerService) handleMetrics(ctx context.Context, agentID string, chan MFOwnerID: agentPb.OwnerID, MFThingID: agentID, MFChannelID: channelID, - OrbTags: agentPb.OrbTags, + OrbTags: (*types.Tags)(&agentPb.OrbTags), AgentTags: agentPb.AgentTags, } diff --git a/sinker/otel/bridgeservice/bridge.go b/sinker/otel/bridgeservice/bridge.go index 0582acfc7..f30bcc1a0 100644 --- a/sinker/otel/bridgeservice/bridge.go +++ b/sinker/otel/bridgeservice/bridge.go @@ -37,26 +37,51 @@ type SinkerOtelBridgeService struct { fleetClient fleetpb.FleetServiceClient } -func (bs *SinkerOtelBridgeService) NotifyActiveSink(_ context.Context, mfOwnerId, sinkId, newState, message string) error { +func (bs *SinkerOtelBridgeService) NotifyActiveSink(ctx context.Context, mfOwnerId, sinkId, newState, message string) error { cfgRepo, err := bs.sinkerCache.Get(mfOwnerId, sinkId) if err != nil { bs.logger.Error("unable to retrieve the sink config", zap.Error(err)) return err } - err = cfgRepo.State.SetFromString(newState) - if err != nil { - bs.logger.Error("unable to set state", zap.String("new_state", newState), zap.Error(err)) - return err - } - if cfgRepo.State == config.Error { - cfgRepo.Msg = message - } else if cfgRepo.State == config.Active { + + // only updates sink state if status Idle or Unknown + if cfgRepo.State == config.Idle || cfgRepo.State == config.Unknown { cfgRepo.LastRemoteWrite = time.Now() - } - err = bs.sinkerCache.Edit(cfgRepo) - if err != nil { - bs.logger.Error("error during update sink cache", zap.String("sinkId", sinkId), zap.Error(err)) - return err + // only deploy collector if new state is "active" and current state "not active" + if newState == "active" && cfgRepo.State != config.Active { + err = cfgRepo.State.SetFromString(newState) + if err != nil { + bs.logger.Error("unable to set state", zap.String("new_state", newState), zap.Error(err)) + return err + } + err = bs.sinkerCache.AddActivity(mfOwnerId, sinkId) + if err != nil { + bs.logger.Error("error during update last remote write", zap.String("sinkId", sinkId), zap.Error(err)) + return err + } + err = bs.sinkerCache.DeployCollector(ctx, cfgRepo) + if err != nil { + bs.logger.Error("error during update sink cache", zap.String("sinkId", sinkId), zap.Error(err)) + return err + } + bs.logger.Info("waking up sink to active", zap.String("sinkID", sinkId), zap.String("newState", newState), zap.Any("currentState", cfgRepo.State)) + } else { + err = bs.sinkerCache.AddActivity(mfOwnerId, sinkId) + if err != nil { + bs.logger.Error("error during update last remote write", zap.String("sinkId", sinkId), zap.Error(err)) + return err + } + bs.logger.Info("registering sink activity", zap.String("sinkID", sinkId), zap.String("newState", newState), zap.Any("currentState", cfgRepo.State)) + } + } else if cfgRepo.State == config.Active { + err = bs.sinkerCache.AddActivity(mfOwnerId, sinkId) + if err != nil { + bs.logger.Error("error during update last remote write", zap.String("sinkId", sinkId), zap.Error(err)) + return err + } + bs.logger.Info("registering sink activity", zap.String("sinkID", sinkId), zap.String("newState", newState), zap.Any("currentState", cfgRepo.State)) + } else if cfgRepo.State == config.Error { + cfgRepo.Msg = message } return nil diff --git a/sinker/redis/consumer/streams.go b/sinker/redis/consumer/streams.go index e6dd0dc7a..74cb36f9d 100644 --- a/sinker/redis/consumer/streams.go +++ b/sinker/redis/consumer/streams.go @@ -28,6 +28,7 @@ type Subscriber interface { } type eventStore struct { + otelEnabled bool sinkerService sinker.Service configRepo config.ConfigRepo client *redis.Client @@ -36,14 +37,18 @@ type eventStore struct { } func (es eventStore) Subscribe(context context.Context) error { - err := es.client.XGroupCreateMkStream(context, stream, group, "$").Err() + subGroup := group + if es.otelEnabled { + subGroup = group + ".otel" + } + err := es.client.XGroupCreateMkStream(context, stream, subGroup, "$").Err() if err != nil && err.Error() != exists { return err } for { streams, err := es.client.XReadGroup(context, &redis.XReadGroupArgs{ - Group: group, + Group: subGroup, Consumer: es.esconsumer, Streams: []string{stream, ">"}, Count: 100, @@ -68,7 +73,7 @@ func (es eventStore) Subscribe(context context.Context) error { es.logger.Error("Failed to handle event", zap.String("operation", event["operation"].(string)), zap.Error(err)) break } - es.client.XAck(context, stream, group, msg.ID) + es.client.XAck(context, stream, subGroup, msg.ID) case sinksUpdate: rte, derr := decodeSinksUpdate(event) if derr != nil { @@ -86,9 +91,9 @@ func (es eventStore) Subscribe(context context.Context) error { } if err != nil { es.logger.Error("Failed to handle event", zap.String("operation", event["operation"].(string)), zap.Error(err)) - break + continue } - es.client.XAck(context, stream, group, msg.ID) + es.client.XAck(context, stream, subGroup, msg.ID) } } } @@ -138,11 +143,6 @@ func decodeSinksRemove(event map[string]interface{}) (updateSinkEvent, error) { owner: read(event, "owner", ""), timestamp: time.Time{}, } - var metadata types.Metadata - if err := json.Unmarshal([]byte(read(event, "config", "")), &metadata); err != nil { - return updateSinkEvent{}, err - } - val.config = metadata return val, nil } diff --git a/sinker/redis/producer/streams.go b/sinker/redis/producer/streams.go index 3b88e5cc8..b21399f43 100644 --- a/sinker/redis/producer/streams.go +++ b/sinker/redis/producer/streams.go @@ -10,8 +10,7 @@ import ( ) const ( - streamID = "orb.sinker" - streamLen = 1000 + streamID = "orb.sinker" ) var _ config.ConfigRepo = (*eventStore)(nil) @@ -22,6 +21,32 @@ type eventStore struct { logger *zap.Logger } +// DeployCollector only used in maestro +func (e eventStore) DeployCollector(ctx context.Context, config config.SinkConfig) error { + err := e.sinkCache.Edit(config) + if err != nil { + return err + } + + eventToSink := SinkerUpdateEvent{ + SinkID: config.SinkID, + Owner: config.OwnerID, + State: config.State.String(), + Msg: config.Msg, + Timestamp: time.Now(), + } + recordToSink := &redis.XAddArgs{ + Stream: streamID, + Values: eventToSink.Encode(), + } + err = e.client.XAdd(ctx, recordToSink).Err() + if err != nil { + e.logger.Error("error sending event to sinker event store", zap.Error(err)) + } + + return nil +} + func (e eventStore) Exists(ownerID string, sinkID string) bool { return e.sinkCache.Exists(ownerID, sinkID) } @@ -40,9 +65,8 @@ func (e eventStore) Add(config config.SinkConfig) error { Timestamp: time.Now(), } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), + Stream: streamID, + Values: event.Encode(), } err = e.client.XAdd(context.Background(), record).Err() if err != nil { @@ -64,9 +88,8 @@ func (e eventStore) Remove(ownerID string, sinkID string) error { Timestamp: time.Now(), } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), + Stream: streamID, + Values: event.Encode(), } err = e.client.XAdd(context.Background(), record).Err() if err != nil { @@ -93,9 +116,8 @@ func (e eventStore) Edit(config config.SinkConfig) error { Timestamp: time.Now(), } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), + Stream: streamID, + Values: event.Encode(), } err = e.client.XAdd(context.Background(), record).Err() if err != nil { @@ -104,6 +126,14 @@ func (e eventStore) Edit(config config.SinkConfig) error { return nil } +func (e eventStore) GetActivity(ownerID string, sinkID string) (int64, error) { + return e.sinkCache.GetActivity(ownerID, sinkID) +} + +func (e eventStore) AddActivity(ownerID string, sinkID string) error { + return e.sinkCache.AddActivity(ownerID, sinkID) +} + func (e eventStore) GetAll(ownerID string) ([]config.SinkConfig, error) { return e.sinkCache.GetAll(ownerID) } diff --git a/sinker/redis/sinker.go b/sinker/redis/sinker.go index 86de8cd13..8572218ca 100644 --- a/sinker/redis/sinker.go +++ b/sinker/redis/sinker.go @@ -3,28 +3,34 @@ package redis import ( "context" "encoding/json" + "errors" "fmt" + "strconv" "strings" + "time" + + "github.com/ns1labs/orb/sinker/redis/producer" "github.com/go-redis/redis/v8" "github.com/ns1labs/orb/sinker" - "github.com/ns1labs/orb/sinker/config" + sinkerconfig "github.com/ns1labs/orb/sinker/config" "go.uber.org/zap" ) const ( - keyPrefix = "sinker_key" - idPrefix = "sinker" + keyPrefix = "sinker_key" + activityPrefix = "sinker_activity" + idPrefix = "orb.maestro" ) -var _ config.ConfigRepo = (*sinkerCache)(nil) +var _ sinkerconfig.ConfigRepo = (*sinkerCache)(nil) type sinkerCache struct { client *redis.Client logger *zap.Logger } -func NewSinkerCache(client *redis.Client, logger *zap.Logger) config.ConfigRepo { +func NewSinkerCache(client *redis.Client, logger *zap.Logger) sinkerconfig.ConfigRepo { return &sinkerCache{client: client, logger: logger} } @@ -39,7 +45,7 @@ func (s *sinkerCache) Exists(ownerID string, sinkID string) bool { return false } -func (s *sinkerCache) Add(config config.SinkConfig) error { +func (s *sinkerCache) Add(config sinkerconfig.SinkConfig) error { skey := fmt.Sprintf("%s-%s:%s", keyPrefix, config.OwnerID, config.SinkID) bytes, err := json.Marshal(config) if err != nil { @@ -59,23 +65,23 @@ func (s *sinkerCache) Remove(ownerID string, sinkID string) error { return nil } -func (s *sinkerCache) Get(ownerID string, sinkID string) (config.SinkConfig, error) { +func (s *sinkerCache) Get(ownerID string, sinkID string) (sinkerconfig.SinkConfig, error) { if ownerID == "" || sinkID == "" { - return config.SinkConfig{}, sinker.ErrNotFound + return sinkerconfig.SinkConfig{}, sinker.ErrNotFound } skey := fmt.Sprintf("%s-%s:%s", keyPrefix, ownerID, sinkID) cachedConfig, err := s.client.Get(context.Background(), skey).Result() if err != nil { - return config.SinkConfig{}, err + return sinkerconfig.SinkConfig{}, err } - var cfgSinker config.SinkConfig + var cfgSinker sinkerconfig.SinkConfig if err := json.Unmarshal([]byte(cachedConfig), &cfgSinker); err != nil { - return config.SinkConfig{}, err + return sinkerconfig.SinkConfig{}, err } return cfgSinker, nil } -func (s *sinkerCache) Edit(config config.SinkConfig) error { +func (s *sinkerCache) Edit(config sinkerconfig.SinkConfig) error { if err := s.Remove(config.OwnerID, config.SinkID); err != nil { return err } @@ -85,6 +91,55 @@ func (s *sinkerCache) Edit(config config.SinkConfig) error { return nil } +// check collector activity + +func (s *sinkerCache) GetActivity(ownerID string, sinkID string) (int64, error) { + if ownerID == "" || sinkID == "" { + return 0, errors.New("invalid parameters") + } + skey := fmt.Sprintf("%s:%s", activityPrefix, sinkID) + secs, err := s.client.Get(context.Background(), skey).Result() + if err != nil { + return 0, err + } + lastActivity, _ := strconv.ParseInt(secs, 10, 64) + return lastActivity, nil +} + +func (s *sinkerCache) AddActivity(ownerID string, sinkID string) error { + if ownerID == "" || sinkID == "" { + return errors.New("invalid parameters") + } + skey := fmt.Sprintf("%s:%s", activityPrefix, sinkID) + lastActivity := strconv.FormatInt(time.Now().Unix(), 10) + if err := s.client.Set(context.Background(), skey, lastActivity, 0).Err(); err != nil { + return err + } + s.logger.Info("added activity for owner and sink ids", zap.String("owner", ownerID), zap.String("sinkID", sinkID)) + return nil +} + +// + +func (s *sinkerCache) DeployCollector(ctx context.Context, config sinkerconfig.SinkConfig) error { + event := producer.SinkerUpdateEvent{ + SinkID: config.SinkID, + Owner: config.OwnerID, + State: config.State.String(), + Msg: config.Msg, + Timestamp: time.Now(), + } + encodeEvent := redis.XAddArgs{ + ID: config.SinkID, + Stream: idPrefix, + Values: event, + } + if cmd := s.client.XAdd(ctx, &encodeEvent); cmd.Err() != nil { + return cmd.Err() + } + return nil +} + func (s *sinkerCache) GetAllOwners() ([]string, error) { iter := s.client.Scan(context.Background(), 0, fmt.Sprintf("%s-*", keyPrefix), 0).Iterator() var owners []string @@ -101,9 +156,9 @@ func (s *sinkerCache) GetAllOwners() ([]string, error) { return owners, nil } -func (s *sinkerCache) GetAll(ownerID string) ([]config.SinkConfig, error) { +func (s *sinkerCache) GetAll(ownerID string) ([]sinkerconfig.SinkConfig, error) { iter := s.client.Scan(context.Background(), 0, fmt.Sprintf("%s-%s:*", keyPrefix, ownerID), 0).Iterator() - var configs []config.SinkConfig + var configs []sinkerconfig.SinkConfig for iter.Next(context.Background()) { keys := strings.Split(strings.TrimPrefix(iter.Val(), fmt.Sprintf("%s-", keyPrefix)), ":") sinkID := "" diff --git a/sinks/api/grpc/client.go b/sinks/api/grpc/client.go index 9db58dfec..89d0023e2 100644 --- a/sinks/api/grpc/client.go +++ b/sinks/api/grpc/client.go @@ -47,6 +47,7 @@ func (client grpcClient) RetrieveSinks(ctx context.Context, in *pb.SinksFilterRe for i, sinkResponse := range ir.sinks { sinkList[i] = &pb.SinkRes{ Id: sinkResponse.id, + OwnerID: sinkResponse.mfOwnerId, Name: sinkResponse.name, Description: sinkResponse.description, Tags: sinkResponse.tags, @@ -122,6 +123,7 @@ func decodeSinksResponse(_ context.Context, grpcRes interface{}) (interface{}, e for i, sink := range res.Sinks { sinkList[i] = sinkRes{ id: sink.Id, + mfOwnerId: sink.OwnerID, name: sink.Name, description: sink.Description, tags: sink.Tags, diff --git a/sinks/api/grpc/endpoint.go b/sinks/api/grpc/endpoint.go index c1ce91fe6..74838e9ec 100644 --- a/sinks/api/grpc/endpoint.go +++ b/sinks/api/grpc/endpoint.go @@ -50,6 +50,7 @@ func retrieveSinksEndpoint(svc sinks.SinkService) endpoint.Endpoint { req := request.(sinksFilter) filter := sinks.Filter{ OpenTelemetry: req.isOtel, + StateFilter: req.state, } sinksInternal, err := svc.ListSinksInternal(ctx, filter) if err != nil { @@ -81,6 +82,7 @@ func buildSinkResponse(sink sinks.Sink) (sinkRes, error) { return sinkRes{ id: sink.ID, + mfOwnerId: sink.MFOwnerID, name: sink.Name.String(), description: *sink.Description, tags: tagData, diff --git a/sinks/api/grpc/request.go b/sinks/api/grpc/request.go index 38b031fe3..d2dc11626 100644 --- a/sinks/api/grpc/request.go +++ b/sinks/api/grpc/request.go @@ -19,6 +19,7 @@ type accessByIDReq struct { type sinksFilter struct { isOtel string + state string } func (req accessByIDReq) validate() error { diff --git a/sinks/api/grpc/response.go b/sinks/api/grpc/response.go index a3a8d01ea..1aee5e064 100644 --- a/sinks/api/grpc/response.go +++ b/sinks/api/grpc/response.go @@ -10,6 +10,7 @@ package grpc type sinkRes struct { id string + mfOwnerId string name string description string tags []byte diff --git a/sinks/api/grpc/server.go b/sinks/api/grpc/server.go index 82d913cd5..d7972ba96 100644 --- a/sinks/api/grpc/server.go +++ b/sinks/api/grpc/server.go @@ -75,6 +75,7 @@ func encodeSinksResponse(_ context.Context, grpcRes interface{}) (interface{}, e for i, sink := range res.sinks { sList[i] = &pb.SinkRes{ Id: sink.id, + OwnerID: sink.mfOwnerId, Name: sink.name, Description: sink.description, Tags: sink.tags, diff --git a/sinks/api/http/endpoint_test.go b/sinks/api/http/endpoint_test.go index fbed5b9b9..6af7deeda 100644 --- a/sinks/api/http/endpoint_test.go +++ b/sinks/api/http/endpoint_test.go @@ -34,8 +34,8 @@ const ( token = "token" invalidToken = "invalid" email = "user@example.com" - validJson = "{\n \"name\": \"my-prom-sink\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"my.prometheus-host.com\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n },\n \"validate_only\": false\n}" - conflictValidJson = "{\n \"name\": \"conflict\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"my.prometheus-host.com\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n },\n \"validate_only\": false\n}" + validJson = "{\n \"name\": \"my-prom-sink\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"https://orb.community/\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n },\n \"validate_only\": false\n}" + conflictValidJson = "{\n \"name\": \"conflict\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"https://orb.community/\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n },\n \"validate_only\": false\n}" invalidJson = "{" ) @@ -46,7 +46,7 @@ var ( Name: nameID, Description: &description, Backend: "prometheus", - Config: map[string]interface{}{"remote_host": "data", "username": "dbuser"}, + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": "dbuser"}, Tags: map[string]string{"cloud": "aws"}, } invalidName = strings.Repeat("m", maxNameSize+1) @@ -123,7 +123,7 @@ func TestCreateSinks(t *testing.T) { Backend: "prometheus", Config: types.Metadata{ "username": "test", - "remote_host": "my.prometheus-host.com", + "remote_host": "https://orb.community/", "description": "An example prometheus sink", }, Tags: map[string]string{ @@ -136,7 +136,7 @@ func TestCreateSinks(t *testing.T) { Backend: "prometheus", Config: types.Metadata{ "username": "test", - "remote_host": "my.prometheus-host.com", + "remote_host": "https://orb.community/", "description": "An example prometheus sink", }, Tags: map[string]string{ @@ -432,7 +432,7 @@ func TestListSinks(t *testing.T) { snk := sinks.Sink{ Name: skName, Backend: "prometheus", - Config: map[string]interface{}{"remote_host": "data", "username": "dbuser"}, + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": "dbuser"}, Tags: map[string]string{"cloud": "aws"}, } @@ -902,10 +902,10 @@ func TestValidateSink(t *testing.T) { server := newServer(service) defer server.Close() - var invalidSinkField = "{\n \"namee\": \"my-prom-sink\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"my.prometheus-host.com\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n }}" - var invalidSinkValueName = "{\n \"name\": \"my...SINK1\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"my.prometheus-host.com\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n }}" - var invalidSinkValueBackend = "{\n \"name\": \"my-prom-sink\",\n \"backend\": \"invalidBackend\",\n \"config\": {\n \"remote_host\": \"my.prometheus-host.com\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n }}" - var invalidSinkValueTag = "{\n \"name\": \"my-prom-sink\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"my.prometheus-host.com\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": \"invalidTag\"}" + var invalidSinkField = "{\n \"namee\": \"my-prom-sink\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"https://orb.community/\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n }}" + var invalidSinkValueName = "{\n \"name\": \"my...SINK1\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"https://orb.community/\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n }}" + var invalidSinkValueBackend = "{\n \"name\": \"my-prom-sink\",\n \"backend\": \"invalidBackend\",\n \"config\": {\n \"remote_host\": \"https://orb.community/\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n }}" + var invalidSinkValueTag = "{\n \"name\": \"my-prom-sink\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"https://orb.community/\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": \"invalidTag\"}" cases := map[string]struct { req string diff --git a/sinks/pb/sinks.pb.go b/sinks/pb/sinks.pb.go index 1f3372309..433430cad 100644 --- a/sinks/pb/sinks.pb.go +++ b/sinks/pb/sinks.pb.go @@ -73,6 +73,7 @@ type SinksFilterReq struct { unknownFields protoimpl.UnknownFields OtelEnabled string `protobuf:"bytes,1,opt,name=otelEnabled,proto3" json:"otelEnabled,omitempty"` + State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` } func (x *SinksFilterReq) Reset() { @@ -114,6 +115,13 @@ func (x *SinksFilterReq) GetOtelEnabled() string { return "" } +func (x *SinksFilterReq) GetState() string { + if x != nil { + return x.State + } + return "" +} + type SinkByIDReq struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -182,6 +190,7 @@ type SinkRes struct { Error string `protobuf:"bytes,6,opt,name=error,proto3" json:"error,omitempty"` Backend string `protobuf:"bytes,7,opt,name=backend,proto3" json:"backend,omitempty"` Config []byte `protobuf:"bytes,8,opt,name=config,proto3" json:"config,omitempty"` + OwnerID string `protobuf:"bytes,9,opt,name=ownerID,proto3" json:"ownerID,omitempty"` } func (x *SinkRes) Reset() { @@ -272,6 +281,13 @@ func (x *SinkRes) GetConfig() []byte { return nil } +func (x *SinkRes) GetOwnerID() string { + if x != nil { + return x.OwnerID + } + return "" +} + var File_sinks_pb_sinks_proto protoreflect.FileDescriptor var file_sinks_pb_sinks_proto_rawDesc = []byte{ @@ -280,26 +296,29 @@ var file_sinks_pb_sinks_proto_rawDesc = []byte{ 0x08, 0x53, 0x69, 0x6e, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x05, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x2e, 0x53, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x22, - 0x32, 0x0a, 0x0e, 0x53, 0x69, 0x6e, 0x6b, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, + 0x48, 0x0a, 0x0e, 0x53, 0x69, 0x6e, 0x6b, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x20, 0x0a, 0x0b, 0x6f, 0x74, 0x65, 0x6c, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x74, 0x65, 0x6c, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x22, 0x3f, 0x0a, 0x0b, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x79, 0x49, 0x44, 0x52, - 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x6e, 0x6b, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x73, 0x69, 0x6e, 0x6b, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x77, - 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, - 0x65, 0x72, 0x49, 0x44, 0x22, 0xc1, 0x01, 0x0a, 0x07, 0x53, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x73, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, - 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, - 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x7e, 0x0a, 0x0b, 0x53, 0x69, 0x6e, 0x6b, + 0x6c, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x3f, 0x0a, 0x0b, 0x53, 0x69, 0x6e, + 0x6b, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x6e, 0x6b, + 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x69, 0x6e, 0x6b, 0x49, 0x44, + 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, 0xdb, 0x01, 0x0a, 0x07, 0x53, + 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, + 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, + 0x0a, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x32, 0x7e, 0x0a, 0x0b, 0x53, 0x69, 0x6e, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x0c, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x12, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x2e, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x0e, 0x2e, 0x73, 0x69, diff --git a/sinks/pb/sinks.proto b/sinks/pb/sinks.proto index 1ecc1ff66..5286a163f 100644 --- a/sinks/pb/sinks.proto +++ b/sinks/pb/sinks.proto @@ -14,6 +14,7 @@ message SinksRes { message SinksFilterReq { string otelEnabled = 1; + string state = 2; } message SinkByIDReq { @@ -30,4 +31,5 @@ message SinkRes { string error = 6; string backend = 7; bytes config = 8; + string ownerID = 9; } diff --git a/sinks/postgres/sinks.go b/sinks/postgres/sinks.go index 129f4ddb3..f44deb3f1 100644 --- a/sinks/postgres/sinks.go +++ b/sinks/postgres/sinks.go @@ -14,6 +14,7 @@ import ( "encoding/json" "fmt" "github.com/gofrs/uuid" + "github.com/jmoiron/sqlx" "github.com/lib/pq" "github.com/ns1labs/orb/pkg/db" "github.com/ns1labs/orb/pkg/errors" @@ -43,7 +44,12 @@ func (s sinksRepository) SearchAllSinks(ctx context.Context, filter sinks.Filter if err != nil { return nil, errors.Wrap(errors.ErrSelectEntity, err) } - defer rows.Close() + defer func(rows *sqlx.Rows) { + err := rows.Close() + if err != nil { + s.logger.Error("error closing rows", zap.Error(err)) + } + }(rows) items := make([]sinks.Sink, 0) for rows.Next() { diff --git a/sinks/redis/consumer/streams.go b/sinks/redis/consumer/streams.go index 68baea35b..97772dd07 100644 --- a/sinks/redis/consumer/streams.go +++ b/sinks/redis/consumer/streams.go @@ -2,10 +2,11 @@ package consumer import ( "context" + "time" + "github.com/go-redis/redis/v8" "github.com/ns1labs/orb/sinks" "go.uber.org/zap" - "time" ) const ( @@ -15,8 +16,6 @@ const ( sinkerPrefix = "sinker." sinkerUpdate = sinkerPrefix + "update" - otelYamlPrefix = "otel.yaml.sinker." - exists = "BUSYGROUP Consumer Group name already exists" ) @@ -59,11 +58,11 @@ func (es eventStore) Subscribe(context context.Context) error { for _, msg := range streams[0].Messages { event := msg.Values - + es.logger.Info("received message in sinker event bus", zap.Any("operation", event["operation"])) var err error switch event["operation"] { case sinkerUpdate: - rte := decodeSinkerStateUpdate(event) + rte := es.decodeSinkerStateUpdate(event) err = es.handleSinkerStateUpdate(context, rte) } if err != nil { @@ -83,14 +82,18 @@ func (es eventStore) handleSinkerStateUpdate(ctx context.Context, event stateUpd return nil } -func decodeSinkerStateUpdate(event map[string]interface{}) stateUpdateEvent { +func (es eventStore) decodeSinkerStateUpdate(event map[string]interface{}) stateUpdateEvent { val := stateUpdateEvent{ ownerID: read(event, "owner", ""), sinkID: read(event, "sink_id", ""), msg: read(event, "msg", ""), timestamp: time.Time{}, } - val.state.Scan(event["state"]) + err := val.state.Scan(event["state"]) + if err != nil { + es.logger.Error("error parsing the state", zap.Error(err)) + return stateUpdateEvent{} + } return val } diff --git a/sinks/redis/producer/streams.go b/sinks/redis/producer/streams.go index fd465de55..7d556bdd0 100644 --- a/sinks/redis/producer/streams.go +++ b/sinks/redis/producer/streams.go @@ -45,7 +45,6 @@ func (es eventStore) ViewSinkInternal(ctx context.Context, ownerID string, key s func (es eventStore) CreateSink(ctx context.Context, token string, s sinks.Sink) (sink sinks.Sink, err error) { defer func() { - event := createSinkEvent{ sinkID: sink.ID, owner: sink.MFOwnerID, @@ -58,15 +57,16 @@ func (es eventStore) CreateSink(ctx context.Context, token string, s sinks.Sink) } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: encode, + Stream: streamID, + MaxLen: streamLen, + Values: encode, } err = es.client.XAdd(ctx, record).Err() if err != nil { - es.logger.Error("error sending event to event store", zap.Error(err)) + es.logger.Error("error sending event to sinks event store", zap.Error(err)) } + }() return es.svc.CreateSink(ctx, token, s) @@ -86,17 +86,16 @@ func (es eventStore) UpdateSink(ctx context.Context, token string, s sinks.Sink) } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: encode, + Stream: streamID, + MaxLen: streamLen, + Values: encode, } err = es.client.XAdd(ctx, record).Err() if err != nil { - es.logger.Error("error sending event to event store", zap.Error(err)) + es.logger.Error("error sending event to sinks event store", zap.Error(err)) } }() - return es.svc.UpdateSink(ctx, token, s) } @@ -137,14 +136,14 @@ func (es eventStore) DeleteSink(ctx context.Context, token, id string) (err erro } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: encode, + Stream: streamID, + MaxLen: streamLen, + Values: encode, } err = es.client.XAdd(ctx, record).Err() if err != nil { - es.logger.Error("error sending event to event store", zap.Error(err)) + es.logger.Error("error sending event to sinks event store", zap.Error(err)) return err } return nil diff --git a/sinks/sinks_service.go b/sinks/sinks_service.go index 9a625bdc3..7cf00200e 100644 --- a/sinks/sinks_service.go +++ b/sinks/sinks_service.go @@ -11,7 +11,9 @@ package sinks import ( "context" "github.com/ns1labs/orb/pkg/errors" + "github.com/ns1labs/orb/pkg/types" "github.com/ns1labs/orb/sinks/backend" + "net/url" ) var ( @@ -35,12 +37,23 @@ func (svc sinkService) CreateSink(ctx context.Context, token string, sink Sink) return Sink{}, err } + // Validate remote_host + _, err = url.ParseRequestURI(sink.Config["remote_host"].(string)) + if err != nil { + return Sink{}, errors.Wrap(errors.New("invalid remote url"), err) + } + // encrypt data for the password sink, err = svc.encryptMetadata(sink) if err != nil { return Sink{}, errors.Wrap(ErrCreateSink, err) } + //// add default values + defaultMetadata := make(types.Metadata, 1) + defaultMetadata["opentelemetry"] = "enabled" + sink.Config.Merge(defaultMetadata) + id, err := svc.sinkRepo.Save(ctx, sink) if err != nil { return Sink{}, errors.Wrap(ErrCreateSink, err) @@ -97,6 +110,15 @@ func (svc sinkService) UpdateSink(ctx context.Context, token string, sink Sink) if sink.Config == nil { sink.Config = currentSink.Config + } else { + // Validate remote_host + _, err := url.ParseRequestURI(sink.Config["remote_host"].(string)) + if err != nil { + return Sink{}, errors.Wrap(ErrUpdateEntity, err) + } + // This will keep the previous tags + currentSink.Config.Merge(sink.Config) + sink.Config = currentSink.Config } if sink.Tags == nil { @@ -125,11 +147,11 @@ func (svc sinkService) UpdateSink(ctx context.Context, token string, sink Sink) } sinkEdited, err := svc.sinkRepo.RetrieveById(ctx, sink.ID) if err != nil { - return Sink{}, err + return Sink{}, errors.Wrap(ErrUpdateEntity, err) } sinkEdited, err = svc.decryptMetadata(sinkEdited) if err != nil { - return Sink{}, err + return Sink{}, errors.Wrap(ErrUpdateEntity, err) } return sinkEdited, nil diff --git a/sinks/sinks_service_test.go b/sinks/sinks_service_test.go index 7a23852cb..9bdf9ec64 100644 --- a/sinks/sinks_service_test.go +++ b/sinks/sinks_service_test.go @@ -41,7 +41,7 @@ var ( Backend: "prometheus", State: sinks.Unknown, Error: "", - Config: map[string]interface{}{"remote_host": "data", "username": "dbuser"}, + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": "dbuser"}, Tags: map[string]string{"cloud": "aws"}, } wrongID, _ = uuid.NewV4() @@ -131,7 +131,7 @@ func TestUpdateSink(t *testing.T) { Backend: "prometheus", State: sinks.Unknown, Error: "", - Config: map[string]interface{}{"remote_host": "data", "username": "dbuser"}, + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": "dbuser"}, Tags: map[string]string{"cloud": "aws"}, }) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) @@ -143,7 +143,7 @@ func TestUpdateSink(t *testing.T) { Backend: "prometheus", State: sinks.Unknown, Error: "", - Config: map[string]interface{}{"remote_host": "data", "username": "dbuser"}, + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": "dbuser"}, Tags: map[string]string{"cloud": "aws"}, }) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) @@ -155,7 +155,7 @@ func TestUpdateSink(t *testing.T) { Backend: "prometheus", State: sinks.Unknown, Error: "", - Config: map[string]interface{}{"remote_host": "data", "username": "dbuser"}, + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": "dbuser"}, Tags: map[string]string{"cloud": "aws"}, }) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) @@ -167,7 +167,7 @@ func TestUpdateSink(t *testing.T) { Backend: "prometheus", State: sinks.Unknown, Error: "", - Config: map[string]interface{}{"remote_host": "data", "username": "dbuser"}, + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": "dbuser"}, Tags: map[string]string{"cloud": "aws"}, }) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) @@ -203,14 +203,14 @@ func TestUpdateSink(t *testing.T) { incomingSink: sinks.Sink{ ID: sinkTestConfigAttribute.ID, Config: types.Metadata{ - "test": "config", + "remote_host": "https://orb.community/", }, Error: "", }, expectedSink: sinks.Sink{ Name: sinkTestConfigAttribute.Name, Config: types.Metadata{ - "test": "config", + "opentelemetry": "enabled", "remote_host": "https://orb.community/", "username": "dbuser", }, Description: sinkTestConfigAttribute.Description, Tags: sinkTestConfigAttribute.Tags, diff --git a/ui/src/app/pages/fleet/agents/key/agent.key.component.ts b/ui/src/app/pages/fleet/agents/key/agent.key.component.ts index 087b695b1..9c32d41c6 100644 --- a/ui/src/app/pages/fleet/agents/key/agent.key.component.ts +++ b/ui/src/app/pages/fleet/agents/key/agent.key.component.ts @@ -43,6 +43,7 @@ export class AgentKeyComponent implements OnInit { -e ORB_CLOUD_MQTT_CHANNEL_ID=${ this.agent.channel_id } \\ -e ORB_CLOUD_MQTT_KEY=${ this.agent.key } \\ -e PKTVISOR_PCAP_IFACE_DEFAULT=auto \\ +-e ORB_OTEL_ENABLE=true \\ ns1labs/orb-agent`; this.command2show = `docker run -d --restart=always --net=host \\ @@ -51,6 +52,7 @@ ns1labs/orb-agent`; -e ORB_CLOUD_MQTT_CHANNEL_ID=${ this.agent.channel_id } \\ -e ORB_CLOUD_MQTT_KEY=${ this.agent.key } \\ -e PKTVISOR_PCAP_IFACE_DEFAULT=auto \\ +-e ORB_OTEL_ENABLE=true \\ ns1labs/orb-agent`; } diff --git a/ui/src/app/pages/sinks/add/sink.add.component.html b/ui/src/app/pages/sinks/add/sink.add.component.html index 89fc7180e..55c6ed239 100644 --- a/ui/src/app/pages/sinks/add/sink.add.component.html +++ b/ui/src/app/pages/sinks/add/sink.add.component.html @@ -126,10 +126,8 @@

{{strings.sink[isEdit ? 'edit' : 'add']['header']}}

- - - * - + + * {{strings.sink[isEdit ? 'edit' : 'add']['header']}} [attr.data-orb-qa-id]="type" [value]="type">{{ type }} - - {{control.label}} -
diff --git a/ui/src/app/pages/sinks/add/sink.add.component.ts b/ui/src/app/pages/sinks/add/sink.add.component.ts index eaf453fc0..985302196 100644 --- a/ui/src/app/pages/sinks/add/sink.add.component.ts +++ b/ui/src/app/pages/sinks/add/sink.add.component.ts @@ -63,13 +63,6 @@ export class SinkAddComponent { input: entry.input, required: entry.required, })); - accumulator[curr].push({ - type: 'checkbox', - label: 'Enable OpenTelemetry', - prop: 'opentelemetry', - input: 'checkbox', - required: false, - }); return accumulator; }, {}); diff --git a/ui/src/app/shared/components/orb/agent/agent-backends/agent-backends.component.scss b/ui/src/app/shared/components/orb/agent/agent-backends/agent-backends.component.scss index c23878868..bfac59163 100644 --- a/ui/src/app/shared/components/orb/agent/agent-backends/agent-backends.component.scss +++ b/ui/src/app/shared/components/orb/agent/agent-backends/agent-backends.component.scss @@ -7,6 +7,10 @@ h4 { margin-bottom: 1.5rem; } +nb-tab { + padding: 1rem 0.5rem !important; +} + nb-card { border: transparent; border-radius: 0.5rem; diff --git a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts index aa8a912c5..dfa9fb41c 100644 --- a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts +++ b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts @@ -50,6 +50,7 @@ export class AgentProvisioningComponent implements OnInit { -e ORB_CLOUD_MQTT_CHANNEL_ID=${ this.agent?.channel_id } \\ -e ORB_CLOUD_MQTT_KEY="AGENT_KEY" \\ -e PKTVISOR_PCAP_IFACE_DEFAULT=auto \\ +-e ORB_OTEL_ENABLE=true \\ ns1labs/orb-agent`; this.command2show = `docker run -d --restart=always --net=host \\ @@ -58,6 +59,7 @@ ns1labs/orb-agent`; -e ORB_CLOUD_MQTT_CHANNEL_ID=${ this.agent?.channel_id } \\ -e ORB_CLOUD_MQTT_KEY=AGENT_KEY \\ -e PKTVISOR_PCAP_IFACE_DEFAULT=auto \\ +-e ORB_OTEL_ENABLE=true \\ ns1labs/orb-agent`; } } diff --git a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.scss b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.scss index 92d4bf4ed..b5818fb13 100644 --- a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.scss +++ b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.scss @@ -52,6 +52,7 @@ nb-card { white-space: nowrap; text-overflow: ellipsis; width: 100%; + text-align: start; } } } diff --git a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.ts b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.ts index cba78de80..31e1c6d84 100644 --- a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.ts +++ b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.ts @@ -100,43 +100,34 @@ export class PolicyDatasetsComponent { prop: 'agent_group', name: 'Agent Group', - resizeable: false, + resizeable: true, canAutoResize: true, - minWidth: 200, - width: 250, - maxWidth: 300, - flexGrow: 2, + flexGrow: 1, cellTemplate: this.groupTemplateCell, }, { prop: 'valid', name: 'Valid', - resizeable: false, + resizeable: true, canAutoResize: true, - minWidth: 65, - width: 80, - maxWidth: 100, + minWidth: 80, flexGrow: 0, cellTemplate: this.validTemplateCell, }, { prop: 'sinks', name: 'Sinks', - resizeable: false, + resizeable: true, canAutoResize: true, - minWidth: 250, - width: 300, - maxWidth: 500, - flexGrow: 4, + flexGrow: 1, cellTemplate: this.sinksTemplateCell, }, { name: '', prop: 'actions', - minWidth: 100, - resizeable: false, + resizeable: true, sortable: false, - flexGrow: 0, + flexGrow: 1, cellTemplate: this.actionsTemplateCell, }, ];