From bd587153f93d5d7f9bc98bd539061efb8ed88efb Mon Sep 17 00:00:00 2001 From: manrodrigues <78241475+manrodrigues@users.noreply.github.com> Date: Wed, 21 Sep 2022 10:49:16 -0300 Subject: [PATCH] dnstap and flow agents test (#1790) dnstap and flow agents test (#1790) --- python-test/features/integration.feature | 448 +++++++++++++++--- .../features/steps/agent_config_file.py | 148 +++--- .../features/steps/control_plane_agents.py | 69 ++- .../features/steps/control_plane_policies.py | 157 +++++- python-test/features/steps/taps.py | 62 +++ 5 files changed, 710 insertions(+), 174 deletions(-) create mode 100644 python-test/features/steps/taps.py diff --git a/python-test/features/integration.feature b/python-test/features/integration.feature index 7ea658a3c..346a52d0b 100644 --- a/python-test/features/integration.feature +++ b/python-test/features/integration.feature @@ -778,13 +778,90 @@ Scenario: remove one sink from a dataset with 1 sinks, edit the dataset and inse @smoke -Scenario: agent with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) +Scenario: Remotely restart agents with policies applied Given the Orb user has a registered account And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + When remotely restart the agent + Then the container logs that were output after reset the agent contain the message "pktvisor process stopped" within 30 seconds + And the container logs should contain the message "all backends and comms were restarted" within 30 seconds + And the container logs that were output after reset the agent contain the message "removing policies" within 30 seconds + And the container logs that were output after reset the agent contain the message "resetting backend" within 30 seconds + And the container logs that were output after reset the agent contain the message "all backends and comms were restarted" within 30 seconds + And the container logs that were output after reset the agent contain the message "policy applied successfully" referred to each applied policy within 30 seconds + And the container logs that were output after reset the agent contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + +@smoke +Scenario: Remotely restart agents without policies applied + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists - When an agent is self-provisioned via a configuration file on port available with 3 agent tags and has status online + When remotely restart the agent + And the container logs that were output after reset the agent contain the message "resetting backend" within 30 seconds + And the container logs that were output after reset the agent contain the message "pktvisor process stopped" within 30 seconds + And the container logs that were output after reset the agent contain the message "all backends and comms were restarted" within 30 seconds + And 2 simple policies are applied to the group + Then the container logs should contain the message "restarting all backends" within 30 seconds + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs that were output after reset the agent contain the message "policy applied successfully" referred to each applied policy within 20 seconds + And the container logs that were output after reset the agent contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + + +@smoke +Scenario: Create duplicated policy + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + When 1 simple policies are applied to the group + And 1 duplicated policies is applied to the group + Then this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + + +@smoke +Scenario: Remove agent (check dataset) + Given the Orb user has a registered account + And the Orb user logs in + And a new agent is created with 1 orb tag(s) + And the agent container is started on an available port + And the agent status is online + And referred agent is subscribed to 1 group + And that a sink already exists + And 2 simple policies are applied to the group + When this agent is removed + Then 0 agent must be matching on response field matching_agents of the last group created + And the container logs should contain the message "ERROR mqtt log" within 120 seconds + And the container logs should contain the message "error reconnecting with MQTT, stopping agent" within 120 seconds + And last container created is exited within 70 seconds + And the container logs should not contain any panic message + And last container created is exited after 120 seconds + And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + +### AGENTS PROVISIONED USING CONFIGURATION FILES: +########### pcap + +@smoke +Scenario: agent pcap with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online And 1 Agent Group(s) is created with all tags contained in the agent - And 3 simple policies are applied to the group + And 3 simple policies with same tap as created via config file are applied to the group Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent @@ -796,14 +873,14 @@ Scenario: agent with only agent tags subscription to a group with policies creat @smoke -Scenario: agent with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) +Scenario: agent pcap with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And 1 Agent Group(s) is created with 1 orb tag(s) (lower case) - And 3 simple policies are applied to the group + And 3 simple policies pcap are applied to the group And a new agent is created with 0 orb tag(s) - When an agent is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -815,14 +892,14 @@ Scenario: agent with only agent tags subscription to a group with policies creat @smoke -Scenario: agent with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) +Scenario: agent pcap with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in And that a sink already exists - When an agent is self-provisioned via a configuration file on port available with 3 agent tags and has status online + When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online And edit the orb tags on agent and use 2 orb tag(s) And 1 Agent Group(s) is created with all tags contained in the agent - And 3 simple policies are applied to the group + And 3 simple policies with same tap as created via config file are applied to the group Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -834,14 +911,14 @@ Scenario: agent with mixed tags subscription to a group with policies created af @smoke -Scenario: agent with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) +Scenario: agent pcap with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And 1 Agent Group(s) is created with 2 orb tag(s) (lower case) - And 3 simple policies are applied to the group + And 3 simple policies pcap are applied to the group And a new agent is created with 2 orb tag(s) - When an agent is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -852,14 +929,14 @@ Scenario: agent with mixed tags subscription to a group with policies created be And remove the agent .yaml generated on each scenario @smoke -Scenario: agent with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=false) +Scenario: agent pcap with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=false) Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And a new agent is created with 0 orb tag(s) - When an agent is provisioned via a configuration file on port available with 3 agent tags and has status online + When an agent(input_type:pcap, settings: {"iface":"default"}) is provisioned via a configuration file on port available with 3 agent tags and has status online And 1 Agent Group(s) is created with all tags contained in the agent - And 3 simple policies are applied to the group + And 3 simple policies with same tap as created via config file are applied to the group Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent @@ -871,14 +948,14 @@ Scenario: agent with only agent tags subscription to a group with policies creat #@smoke @MUTE -Scenario: agent with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=false) +Scenario: agent pcap with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=false) Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And 1 Agent Group(s) is created with 1 orb tag(s) (lower case) - And 3 simple policies are applied to the group + And 3 simple policies pcap are applied to the group And a new agent is created with 0 orb tag(s) - When an agent is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + When an agent(input_type:pcap, settings: {"iface":"default"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -889,15 +966,15 @@ Scenario: agent with only agent tags subscription to a group with policies creat And remove the agent .yaml generated on each scenario @smoke -Scenario: agent with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=false) +Scenario: agent pcap with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=false) Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And a new agent is created with 2 orb tag(s) - When an agent is provisioned via a configuration file on port available with 3 agent tags and has status online + When an agent(input_type:pcap, settings: {"iface":"default"}) is provisioned via a configuration file on port available with 3 agent tags and has status online And edit the orb tags on agent and use 2 orb tag(s) And 1 Agent Group(s) is created with all tags contained in the agent - And 3 simple policies are applied to the group + And 3 simple policies with same tap as created via config file are applied to the group Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -909,14 +986,14 @@ Scenario: agent with mixed tags subscription to a group with policies created af #@smoke @MUTE -Scenario: agent with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=false) +Scenario: agent pcap with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=false) Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And 1 Agent Group(s) is created with 2 orb tag(s) (lower case) - And 3 simple policies are applied to the group + And 3 simple policies pcap are applied to the group And a new agent is created with 2 orb tag(s) - When an agent is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + When an agent(input_type:pcap, settings: {"iface":"default"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -926,76 +1003,307 @@ Scenario: agent with mixed tags subscription to a group with policies created be And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario + +########### flow + @smoke -Scenario: Remotely restart agents with policies applied +Scenario: agent flow with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online - And referred agent is subscribed to 1 group + And that a sink already exists + When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And 1 Agent Group(s) is created with all tags contained in the agent + And 3 simple policies with same tap as created via config file are applied to the group + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + + +@smoke +Scenario: agent flow with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) + Given the Orb user has a registered account + And the Orb user logs in And that a sink already exists - And 2 simple policies are applied to the group - And this agent's heartbeat shows that 2 policies are applied and all has status running - When remotely restart the agent - Then the container logs that were output after reset the agent contain the message "pktvisor process stopped" within 30 seconds - And the container logs should contain the message "all backends and comms were restarted" within 30 seconds - And the container logs that were output after reset the agent contain the message "removing policies" within 30 seconds - And the container logs that were output after reset the agent contain the message "resetting backend" within 30 seconds - And the container logs that were output after reset the agent contain the message "all backends and comms were restarted" within 30 seconds - And the container logs that were output after reset the agent contain the message "policy applied successfully" referred to each applied policy within 30 seconds - And the container logs that were output after reset the agent contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And 1 Agent Group(s) is created with 1 orb tag(s) (lower case) + And 3 simple policies flow are applied to the group + And a new agent is created with 0 orb tag(s) + When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + @smoke -Scenario: Remotely restart agents without policies applied +Scenario: agent flow with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online - And referred agent is subscribed to 1 group + And that a sink already exists + When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And edit the orb tags on agent and use 2 orb tag(s) + And 1 Agent Group(s) is created with all tags contained in the agent + And 3 simple policies with same tap as created via config file are applied to the group + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + + +@smoke +Scenario: agent flow with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) + Given the Orb user has a registered account + And the Orb user logs in And that a sink already exists - When remotely restart the agent - And the container logs that were output after reset the agent contain the message "resetting backend" within 30 seconds - And the container logs that were output after reset the agent contain the message "pktvisor process stopped" within 30 seconds - And the container logs that were output after reset the agent contain the message "all backends and comms were restarted" within 30 seconds - And 2 simple policies are applied to the group - Then the container logs should contain the message "restarting all backends" within 30 seconds - And this agent's heartbeat shows that 2 policies are applied and all has status running - And the container logs that were output after reset the agent contain the message "policy applied successfully" referred to each applied policy within 20 seconds - And the container logs that were output after reset the agent contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And 1 Agent Group(s) is created with 2 orb tag(s) (lower case) + And 3 simple policies flow are applied to the group + And a new agent is created with 2 orb tag(s) + When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + +@smoke +Scenario: agent flow with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=false) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a new agent is created with 0 orb tag(s) + When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is provisioned via a configuration file on port available with 3 agent tags and has status online + And 1 Agent Group(s) is created with all tags contained in the agent + And 3 simple policies with same tap as created via config file are applied to the group + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + +#@smoke +@MUTE +Scenario: agent flow with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=false) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And 1 Agent Group(s) is created with 1 orb tag(s) (lower case) + And 3 simple policies flow are applied to the group + And a new agent is created with 0 orb tag(s) + When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + +@smoke +Scenario: agent flow with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=false) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a new agent is created with 2 orb tag(s) + When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is provisioned via a configuration file on port available with 3 agent tags and has status online + And edit the orb tags on agent and use 2 orb tag(s) + And 1 Agent Group(s) is created with all tags contained in the agent + And 3 simple policies with same tap as created via config file are applied to the group + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + +#@smoke +@MUTE +Scenario: agent flow with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=false) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And 1 Agent Group(s) is created with 2 orb tag(s) (lower case) + And 3 simple policies flow are applied to the group + And a new agent is created with 2 orb tag(s) + When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + +########### dnstap + @smoke -Scenario: Create duplicated policy +Scenario: agent dnstap with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in - And that an agent with 1 orb tag(s) already exists and is online - And referred agent is subscribed to 1 group + And that a sink already exists + When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And 1 Agent Group(s) is created with all tags contained in the agent + And 3 simple policies with same tap as created via config file are applied to the group + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + + +@smoke +Scenario: agent dnstap with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) + Given the Orb user has a registered account + And the Orb user logs in And that a sink already exists - When 1 simple policies are applied to the group - And 1 duplicated policies is applied to the group - Then this agent's heartbeat shows that 2 policies are applied and all has status running + And 1 Agent Group(s) is created with 1 orb tag(s) (lower case) + And 3 simple policies dnstap are applied to the group + And a new agent is created with 0 orb tag(s) + When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + + +@smoke +Scenario: agent dnstap with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And edit the orb tags on agent and use 2 orb tag(s) + And 1 Agent Group(s) is created with all tags contained in the agent + And 3 simple policies with same tap as created via config file are applied to the group + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds And referred sink must have active state on response within 30 seconds - And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario @smoke -Scenario: Remove agent (check dataset) +Scenario: agent dnstap with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in - And a new agent is created with 1 orb tag(s) - And the agent container is started on an available port - And the agent status is online - And referred agent is subscribed to 1 group And that a sink already exists - And 2 simple policies are applied to the group - When this agent is removed - Then 0 agent must be matching on response field matching_agents of the last group created - And the container logs should contain the message "ERROR mqtt log" within 120 seconds - And the container logs should contain the message "error reconnecting with MQTT, stopping agent" within 120 seconds - And last container created is exited within 70 seconds - And the container logs should not contain any panic message - And last container created is exited after 120 seconds - And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And 1 Agent Group(s) is created with 2 orb tag(s) (lower case) + And 3 simple policies dnstap are applied to the group + And a new agent is created with 2 orb tag(s) + When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + +@smoke +Scenario: agent dnstap with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=false) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a new agent is created with 0 orb tag(s) + When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is provisioned via a configuration file on port available with 3 agent tags and has status online + And 1 Agent Group(s) is created with all tags contained in the agent + And 3 simple policies with same tap as created via config file are applied to the group + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + +#@smoke +@MUTE +Scenario: agent dnstap with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=false) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And 1 Agent Group(s) is created with 1 orb tag(s) (lower case) + And 3 simple policies dnstap are applied to the group + And a new agent is created with 0 orb tag(s) + When an agent(input_type:dnstap, settings: {{"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + +@smoke +Scenario: agent dnstap with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=false) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a new agent is created with 2 orb tag(s) + When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is provisioned via a configuration file on port available with 3 agent tags and has status online + And edit the orb tags on agent and use 2 orb tag(s) + And 1 Agent Group(s) is created with all tags contained in the agent + And 3 simple policies with same tap as created via config file are applied to the group + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario + +#@smoke +@MUTE +Scenario: agent dnstap with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=false) + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And 1 Agent Group(s) is created with 2 orb tag(s) (lower case) + And 3 simple policies dnstap are applied to the group + And a new agent is created with 2 orb tag(s) + When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And the container logs should contain the message "completed RPC subscription to group" within 30 seconds + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 30 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And remove the agent .yaml generated on each scenario diff --git a/python-test/features/steps/agent_config_file.py b/python-test/features/steps/agent_config_file.py index 29f6f27d9..6e2dc060b 100644 --- a/python-test/features/steps/agent_config_file.py +++ b/python-test/features/steps/agent_config_file.py @@ -1,4 +1,7 @@ -from hamcrest import * +import yaml +from utils import return_port_to_run_docker_container + +from taps import * class FleetAgent: @@ -6,75 +9,92 @@ def __init__(self): pass @classmethod - def config_file_of_agent_tap_pcap(cls, name, token, iface, orb_url, base_orb_mqtt, tls_verify="true", - auto_provision="true", orb_cloud_mqtt_id=None, orb_cloud_mqtt_key=None, - orb_cloud_mqtt_channel_id=None): + def config_file_of_orb_agent(cls, name, token, iface, orb_url, base_orb_mqtt, tap_name, tls_verify="true", + auto_provision="true", orb_cloud_mqtt_id=None, orb_cloud_mqtt_key=None, + orb_cloud_mqtt_channel_id=None, input_type="pcap", settings=None): assert_that(tls_verify, any_of(equal_to("true"), equal_to("false")), "Unexpected value for tls_verify on " "agent pcap config file creation") assert_that(auto_provision, any_of(equal_to("true"), equal_to("false")), "Unexpected value for auto_provision " "on agent pcap config file creation") + assert_that(input_type, any_of(equal_to("pcap"), equal_to("flow"), equal_to("dnstap")), + "Unexpect type of input type.") + if "iface" in settings.keys() and settings["iface"] == "default": + settings['iface'] = iface + if input_type == "pcap": + tap = Taps.pcap(tap_name, input_type, settings) + elif input_type == "flow": + tap = Taps.flow(tap_name, input_type, settings) + else: + tap = Taps.dnstap(tap_name, input_type, settings) if auto_provision == "true": - agent_tap_pcap = f""" - version: "1.0" - - visor: - taps: - default_pcap: - input_type: pcap - config: - iface: {iface} - host_spec: "192.168.0.54/32,192.168.0.55/32,127.0.0.1/32" - orb: - backends: - pktvisor: - binary: "/usr/local/sbin/pktvisord" - config_file: /usr/local/orb/{name}.yaml - tls: - verify: {tls_verify} - cloud: - config: - agent_name: {name} - auto_provision: {auto_provision} - - api: - address: {orb_url} - token: {token} - mqtt: - address: {base_orb_mqtt} - - """ + agent = { + "version": "1.0", + "visor": { + "taps": tap + }, + "orb": { + "backends": { + "pktvisor": { + "binary": "/usr/local/sbin/pktvisord", + "config_file": f"/usr/local/orb/{name}.yaml" + } + }, + "tls": { + "verify": { + "tls_verify": tls_verify + } + }, + "cloud": { + "config": { + "auto_provision": auto_provision, + "agent_name": name + }, + "api": { + "address": orb_url, + "token": token + }, + "mqtt": { + "address": base_orb_mqtt + } + } + } + } else: assert_that(orb_cloud_mqtt_id, not_(is_(None)), "orb_cloud_mqtt_id must have a valid value") assert_that(orb_cloud_mqtt_channel_id, not_(is_(None)), "orb_cloud_mqtt_channel_id must have a valid value") assert_that(orb_cloud_mqtt_key, not_(is_(None)), "orb_cloud_mqtt_key must have a valid value") - agent_tap_pcap = f""" - version: "1.0" - - visor: - taps: - default_pcap: - input_type: pcap - config: - iface: {iface} - host_spec: "192.168.0.54/32,192.168.0.55/32,127.0.0.1/32" - orb: - backends: - pktvisor: - binary: "/usr/local/sbin/pktvisord" - config_file: /usr/local/orb/{name}.yaml - tls: - verify: {tls_verify} - cloud: - config: - auto_provision: {auto_provision} - - api: - address: {orb_url} - mqtt: - address: {base_orb_mqtt} - id: {orb_cloud_mqtt_id} - key: {orb_cloud_mqtt_key} - channel_id: {orb_cloud_mqtt_channel_id} - - """ - return agent_tap_pcap + agent = { + "version": "1.0", + "visor": { + "taps": tap + }, + "orb": { + "backends": { + "pktvisor": { + "binary": "/usr/local/sbin/pktvisord", + "config_file": f"/usr/local/orb/{name}.yaml" + } + }, + "tls": { + "verify": { + "tls_verify": tls_verify + } + }, + "cloud": { + "config": { + "auto_provision": auto_provision + }, + "api": { + "address": orb_url + }, + "mqtt": { + "address": base_orb_mqtt, + "id": orb_cloud_mqtt_id, + "key": orb_cloud_mqtt_key, + "channel_id": orb_cloud_mqtt_channel_id + } + } + } + } + agent = yaml.dump(agent) + return agent, tap diff --git a/python-test/features/steps/control_plane_agents.py b/python-test/features/steps/control_plane_agents.py index 7a16e9fba..78bb6d9ed 100644 --- a/python-test/features/steps/control_plane_agents.py +++ b/python-test/features/steps/control_plane_agents.py @@ -232,10 +232,20 @@ def check_agent_exists_on_backend(token, agent_name, event=None): return agent, event.is_set() -@step("an agent is {provision} via a configuration file on port {port} with {agent_tags} agent tags and has " - "status {status}") -def provision_agent_using_config_file(context, provision, port, agent_tags, status): - assert_that(provision, any_of(equal_to("self-provisioned"), equal_to("provisioned")), "Unexpected provision attribute") +@step("an agent(input_type:{input_type}, settings: {settings}) is {provision} via a configuration file on port {port} " + "with {agent_tags} agent tags and has status {status}") +def provision_agent_using_config_file(context, input_type, settings, provision, port, agent_tags, status): + assert_that(provision, any_of(equal_to("self-provisioned"), equal_to("provisioned")), "Unexpected provision " + "attribute") + settings = json.loads(settings) + if ("tcp" in settings.keys() and settings["tcp"].split(":")[1] == "available_port") or ("port" in settings.keys() and settings["port"] == "available_port"): + port_to_attach = return_port_to_run_docker_container(context) + if "tcp" in settings.keys(): + ip = settings["tcp"].split(":")[0] + tcp = f"{ip}:{port_to_attach}" + settings["tcp"] = tcp + else: + settings["port"] = port_to_attach if provision == "provisioned": auto_provision = "false" orb_cloud_mqtt_id = context.agent['id'] @@ -248,15 +258,24 @@ def provision_agent_using_config_file(context, provision, port, agent_tags, stat orb_cloud_mqtt_key = None orb_cloud_mqtt_channel_id = None agent_name = f"{agent_name_prefix}{random_string(10)}" + interface = configs.get('orb_agent_interface', 'mock') orb_url = configs.get('orb_url') base_orb_address = configs.get('orb_address') port = return_port_to_run_docker_container(context, True) - context.agent_file_name, tags_on_agent = create_agent_config_file(context.token, agent_name, interface, - agent_tags, orb_url, base_orb_address, port, - context.agent_groups, auto_provision, - orb_cloud_mqtt_id, - orb_cloud_mqtt_key, orb_cloud_mqtt_channel_id) + if "tap_name" in context: + tap_name = context.tap_name + else: + tap_name = agent_name + context.agent_file_name, tags_on_agent, context.tap = create_agent_config_file(context.token, agent_name, interface, + agent_tags, orb_url, + base_orb_address, port, + context.agent_groups, tap_name, + input_type, auto_provision, + orb_cloud_mqtt_id, + orb_cloud_mqtt_key, + orb_cloud_mqtt_channel_id, + settings) context.container_id = run_agent_config_file(agent_name) if context.container_id not in context.containers_id.keys(): context.containers_id[context.container_id] = str(port) @@ -266,14 +285,18 @@ def provision_agent_using_config_file(context, provision, port, agent_tags, stat f"Logs:{logs}") context.agent, is_agent_created = check_agent_exists_on_backend(context.token, agent_name, timeout=10) context.agent, are_tags_correct = get_agent_tags(context.token, context.agent['id'], tags_on_agent) - assert_that(are_tags_correct, equal_to(True), f"Agent tags created does not match with the required ones. Agent:" - f"{context.agent}. Tags that would be present: {tags_on_agent}") assert_that(is_agent_created, equal_to(True), f"Agent {agent_name} not found. Logs: {logs}") + assert_that(are_tags_correct, equal_to(True), f"Agent tags created does not match with the required ones. Agent:" + f"{context.agent}. Tags that would be present: {tags_on_agent}.\n" + f"Agent Logs: {logs}") assert_that(context.agent, is_not(None), f"Agent {agent_name} not correctly created. Logs: {logs}") agent_id = context.agent['id'] existing_agents = get_agent(context.token, agent_id) assert_that(len(existing_agents), greater_than(0), f"Agent not created. Logs: {logs}") - wait_until_expected_agent_status(context.token, agent_id, status) + agent_status, context.agent = wait_until_expected_agent_status(context.token, agent_id, status) + assert_that(agent_status, is_(equal_to(status)), + f"Agent did not get '{status}' after 30 seconds, but was '{agent_status}'. \n" + f"Agent: {json.dumps(context.agent, indent=4)}. \n Logs: {logs}") @step("remotely restart the agent") @@ -504,8 +527,9 @@ def get_groups_to_which_agent_is_matching(token, agent_id, groups_matching_ids, def create_agent_config_file(token, agent_name, iface, agent_tags, orb_url, base_orb_address, port, - existing_agent_groups, auto_provision="true", orb_cloud_mqtt_id=None, - orb_cloud_mqtt_key=None, orb_cloud_mqtt_channel_id=None): + existing_agent_groups, tap_name, input_type="pcap", auto_provision="true", + orb_cloud_mqtt_id=None, orb_cloud_mqtt_key=None, orb_cloud_mqtt_channel_id=None, + settings=None): """ Create a file .yaml with configs of the agent that will be provisioned @@ -517,10 +541,13 @@ def create_agent_config_file(token, agent_name, iface, agent_tags, orb_url, base :param (str) base_orb_address: base orb url address :param (str) port: port on which agent must run. :param (dict) existing_agent_groups: all agent groups available + :param (str) tap_name: name of the input tap + :param (str) input_type: type of tap on agent. Default: pcap :param (str) auto_provision: if true auto_provision the agent. If false, provision an agent already existent on orb :param (str) orb_cloud_mqtt_id: agent mqtt id. :param (str) orb_cloud_mqtt_key: agent mqtt key. :param (str) orb_cloud_mqtt_channel_id: agent mqtt channel id. + :param (str) settings: settings of input :return: path to the directory where the agent config file was created """ assert_that(auto_provision, any_of(equal_to("true"), equal_to("false")), "Unexpected value for auto_provision " @@ -534,18 +561,22 @@ def create_agent_config_file(token, agent_name, iface, agent_tags, orb_url, base tags = {"tags": create_tags_set(agent_tags)} if configs.get('ignore_ssl_and_certificate_errors', 'true').lower() == 'true': mqtt_url = f"{base_orb_address}:1883" - agent_config_file = FleetAgent.config_file_of_agent_tap_pcap(agent_name, token, iface, orb_url, mqtt_url, + agent_config_file, tap = FleetAgent.config_file_of_orb_agent(agent_name, token, iface, orb_url, mqtt_url, tap_name, tls_verify="false", auto_provision=auto_provision, orb_cloud_mqtt_id=orb_cloud_mqtt_id, orb_cloud_mqtt_key=orb_cloud_mqtt_key, - orb_cloud_mqtt_channel_id=orb_cloud_mqtt_channel_id) + orb_cloud_mqtt_channel_id=orb_cloud_mqtt_channel_id, + input_type=input_type, + settings=settings) else: mqtt_url = "tls://" + base_orb_address + ":8883" - agent_config_file = FleetAgent.config_file_of_agent_tap_pcap(agent_name, token, iface, orb_url, mqtt_url, + agent_config_file, tap = FleetAgent.config_file_of_orb_agent(agent_name, token, iface, orb_url, mqtt_url, tap_name, auto_provision=auto_provision, orb_cloud_mqtt_id=orb_cloud_mqtt_id, orb_cloud_mqtt_key=orb_cloud_mqtt_key, - orb_cloud_mqtt_channel_id=orb_cloud_mqtt_channel_id) + orb_cloud_mqtt_channel_id=orb_cloud_mqtt_channel_id, + input_type=input_type, + settings=settings) agent_config_file = yaml.load(agent_config_file, Loader=SafeLoader) agent_config_file['orb'].update(tags) agent_config_file['orb']['backends']['pktvisor'].update({"api_port": f"{port}"}) @@ -553,7 +584,7 @@ def create_agent_config_file(token, agent_name, iface, agent_tags, orb_url, base dir_path = configs.get("local_orb_path") with open(f"{dir_path}/{agent_name}.yaml", "w+") as f: f.write(agent_config_file) - return agent_name, tags + return agent_name, tags, tap @threading_wait_until diff --git a/python-test/features/steps/control_plane_policies.py b/python-test/features/steps/control_plane_policies.py index 3c38d8956..c037465ba 100644 --- a/python-test/features/steps/control_plane_policies.py +++ b/python-test/features/steps/control_plane_policies.py @@ -18,13 +18,26 @@ @step("a new policy is created using: {kwargs}") def create_new_policy(context, kwargs): - kwargs_dict = parse_policy_params(kwargs) - policy_json = make_policy_json(kwargs_dict["name"], kwargs_dict['handle_label'], - kwargs_dict["handler"], kwargs_dict["description"], kwargs_dict["tap"], - kwargs_dict["input_type"], kwargs_dict["host_specification"], - kwargs_dict["bpf_filter_expression"], kwargs_dict["pcap_source"], - kwargs_dict["only_qname_suffix"], kwargs_dict["only_rcode"], - kwargs_dict["exclude_noerror"], kwargs_dict["backend_type"]) + if kwargs.split(", ")[-1].split("=")[-1] == "flow": + kwargs_dict = parse_flow_policy_params(kwargs) + else: + kwargs_dict = parse_policy_params(kwargs) + if kwargs_dict["handler"] == "flow": + policy_json = make_policy_flow_json(kwargs_dict['name'], kwargs_dict['handle_label'], kwargs_dict['handler'], + kwargs_dict['description'], + kwargs_dict['tap'], kwargs_dict['input_type'], kwargs_dict['port'], + kwargs_dict['bind'], kwargs_dict['flow_type'], + kwargs_dict['sample_rate_scaling'], kwargs_dict['only_devices'], + kwargs_dict['only_ips'], kwargs_dict['only_ports'], + kwargs_dict['only_interfaces'], kwargs_dict['geoloc_notfound'], + kwargs_dict['asn_notfound'], kwargs_dict['backend_type']) + else: + policy_json = make_policy_json(kwargs_dict["name"], kwargs_dict['handle_label'], + kwargs_dict["handler"], kwargs_dict["description"], kwargs_dict["tap"], + kwargs_dict["input_type"], kwargs_dict["host_specification"], + kwargs_dict["bpf_filter_expression"], kwargs_dict["pcap_source"], + kwargs_dict["only_qname_suffix"], kwargs_dict["only_rcode"], + kwargs_dict["exclude_noerror"], kwargs_dict["backend_type"]) context.policy = create_policy(context.token, policy_json) @@ -262,6 +275,24 @@ def apply_n_policies(context, amount_of_policies, type_of_policies): create_new_dataset(context, 1, 'last', 1, 'sink') +@step('{amount_of_policies} {type_of_policies} policies {policies_input} are applied to the group') +def apply_n_policies(context, amount_of_policies, type_of_policies, policies_input): + if "same tap as created via config file" in policies_input: + policies_input = list(context.tap.values())[0]['input_type'] + args_for_policies = return_policies_type(int(amount_of_policies), type_of_policies, policies_input) + if "tap" in context: + tap_name = list(context.tap.keys())[0] + input_type = list(context.tap.values())[0]['input_type'] + else: + context.tap_name = tap_name = f"default_tap_before_provision_{random_string(10)}" + input_type = policies_input + for i in range(int(amount_of_policies)): + kwargs = f"{args_for_policies[i][1]}, tap={tap_name}, input_type={input_type}" + create_new_policy(context, kwargs) + check_policies(context) + create_new_dataset(context, 1, 'last', 1, 'sink') + + @step('{amount_of_policies} {type_of_policies} policies are applied to the group by {amount_of_datasets} datasets each') def apply_n_policies_x_times(context, amount_of_policies, type_of_policies, amount_of_datasets): for n in range(int(amount_of_policies)): @@ -485,6 +516,59 @@ def make_policy_json(name, handler_label, handler, description=None, tap="defaul return json_request +def make_policy_flow_json(name, handler_label, handler, description=None, tap="default_flow", + input_type="flow", port=None, bind=None, flow_type=None, sample_rate_scaling=None, + only_devices=None, only_ips=None, only_ports=None, only_interfaces=None, geoloc_notfound=None, + asn_notfound=None, backend_type="pktvisor"): + """ + + Generate a policy json + + :param (str) name: of the policy to be created + :param (str) handler_label: of the handler + :param (str) handler: to be added + :param (str) description: description of policy + :param tap: named, host specific connection specifications for the raw input streams accessed by pktvisor + :param input_type: this must reference a tap name, or application of the policy will fail + :param backend_type: Agent backend this policy is for. Cannot change once created. Default: pktvisor + :return: (dict) a dictionary containing the created policy data + """ + assert_that(handler, equal_to("flow"), "Unexpected handler for policy") + assert_that(name, not_none(), "Unable to create policy without name") + + json_request = {"name": name, + "description": description, + "backend": backend_type, + "policy": { + "kind": "collection", + "input": { + "tap": tap, + "input_type": input_type, + "config": {"port": port, + "bind": bind, + "only_ports": only_ports, + "flow_type": flow_type}}, + "handlers": { + "modules": { + handler_label: { + "type": handler, + "filter": {"only_devices": only_devices, + "only_ips": only_ips, + "only_ports": only_ports, + "only_interfaces": only_interfaces, + "geoloc_notfound": geoloc_notfound, + "asn_notfound": asn_notfound}, + "config": { + "sample_rate_scaling": sample_rate_scaling} + } + } + } + } + } + json_request = remove_empty_from_json(json_request.copy()) + return json_request + + def get_policy(token, policy_id, expected_status_code=200): """ Gets a policy from Orb control plane @@ -691,27 +775,35 @@ def list_datasets_for_a_policy(policy_id, datasets_list): return id_of_related_datasets -def return_policies_type(k, policies_type='mixed'): +def return_policies_type(k, policies_type='mixed', input_type="pcap"): assert_that(policies_type, any_of(equal_to('mixed'), any_of('simple'), any_of('advanced')), "Unexpected value for policies type") - advanced = { - 'advanced_dns_libpcap_0': "handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=0", - 'advanced_dns_libpcap_2': "handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=2", - 'advanced_dns_libpcap_3': "handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=3", - 'advanced_dns_libpcap_5': "handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=5", + if input_type == "flow": + advanced = { + "advanced_flow": "handler=flow, description='policy_flow', asn_notfound=true, sample_rate_scaling=true" + } + simple = { + 'simple_flow': "handler=flow" + } + else: + advanced = { + 'advanced_dns_libpcap_0': "handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=0", + 'advanced_dns_libpcap_2': "handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=2", + 'advanced_dns_libpcap_3': "handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=3", + 'advanced_dns_libpcap_5': "handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=5", - 'advanced_net': "handler=net, description='policy_net', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap", + 'advanced_net': "handler=net, description='policy_net', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap", - 'advanced_dhcp': "handler=dhcp, description='policy_dhcp', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap", - } + 'advanced_dhcp': "handler=dhcp, description='policy_dhcp', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap", + } - simple = { + simple = { - 'simple_dns': "handler=dns", - 'simple_net': "handler=net", - # 'simple_dhcp': "handler=dhcp", - } + 'simple_dns': "handler=dns", + 'simple_net': "handler=net", + # 'simple_dhcp': "handler=dhcp", + } mixed = dict() mixed.update(advanced) @@ -830,3 +922,26 @@ def parse_policy_params(kwargs): kwargs_dict['handle_label'] = f"default_{kwargs_dict['handler']}_{random_string(3)}" return kwargs_dict + + +def parse_flow_policy_params(kwargs): + name = policy_name_prefix + random_string(10) + + kwargs_dict = {'name': name, 'handler': None, 'description': None, 'tap': "default_flow", + 'input_type': "flow", 'port': None, 'bind': None, 'flow_type': None, 'sample_rate_scaling': None, + 'only_devices': None, 'only_ips': None, 'only_ports': None, 'only_interfaces': None, + 'geoloc_notfound': None, + 'asn_notfound': None, 'backend_type': "pktvisor"} + + for i in kwargs.split(", "): + assert_that(i, matches_regexp("^.+=.+$"), f"Unexpected format for param {i}") + item = i.split("=") + kwargs_dict[item[0]] = item[1] + + if policy_name_prefix not in kwargs_dict["name"]: + kwargs_dict["name"] + policy_name_prefix + kwargs_dict["name"] + + assert_that(kwargs_dict["handler"], equal_to("flow"), "Unexpected handler for policy") + kwargs_dict['handle_label'] = f"default_{kwargs_dict['handler']}_{random_string(3)}" + + return kwargs_dict diff --git a/python-test/features/steps/taps.py b/python-test/features/steps/taps.py new file mode 100644 index 000000000..d3552b037 --- /dev/null +++ b/python-test/features/steps/taps.py @@ -0,0 +1,62 @@ +from hamcrest import * + + +class Taps: + def __init__(self): + pass + + @classmethod + def pcap(cls, name, input_type="pcap", settings=None): + available_options = ["pcap_source", "iface", "host_spec", "debug", "bpf"] + filters_list = ["bpf"] + + return make_tap(name, input_type, available_options, settings, filters_list) + + @classmethod + def flow(cls, name, input_type="flow", settings=None): + available_options = ["port", "bind", "flow_type"] + + return make_tap(name, input_type, available_options, settings) + + @classmethod + def dnstap(cls, name, input_type="dnstap", settings=None): + available_options = ["socket", "tcp", "only_hosts"] + + filters_list = ["only_hosts"] + + return make_tap(name, input_type, available_options, settings, filters_list) + + +def make_tap(name, input_type, available_options, settings, filters_list=None): + if filters_list is None: + filters_list = [] + kwargs_configs = list(settings.keys()) + + assert_that(set(kwargs_configs).issubset(available_options), is_(True), + f"Invalid configuration to tap {input_type}. \n " + f"Options are: {available_options}. \n" + f"Passed: {kwargs_configs}") + + filters = None + for tap_filter in filters_list: + if tap_filter in kwargs_configs: + filters = {tap_filter: settings[tap_filter]} + kwargs_configs.remove(tap_filter) + + if len(kwargs_configs) > 0: + configs = dict() + else: + configs = None + for configuration in kwargs_configs: + configs.update({configuration: settings[configuration]}) + + tap = {name: {"input_type": input_type}} + + if filters is not None: + filters = {"filter": filters} + tap[name].update(filters) + + if configs is not None: + configs = {"config": configs} + tap[name].update(configs) + return tap