diff --git a/configure.ac b/configure.ac index 3a76ece0e39d..da042f97d55b 100644 --- a/configure.ac +++ b/configure.ac @@ -1915,11 +1915,15 @@ fi fi - # Napatech - Using the 3GD API + # Napatech - Using the 3GD API AC_ARG_ENABLE(napatech, AS_HELP_STRING([--enable-napatech],[Enabled Napatech Devices]), [ enable_napatech=$enableval ], [ enable_napatech=no]) + AC_ARG_ENABLE(napatech_bypass, + AS_HELP_STRING([--disable-napatech-bypass],[Disable Bypass feature on Napatech cards]), + [ napatech_bypass=$enableval ], + [ napatech_bypass=yes]) AC_ARG_WITH(napatech_includes, [ --with-napatech-includes=DIR napatech include directory], [with_napatech_includes="$withval"],[with_napatech_includes="/opt/napatech3/include"]) @@ -1957,6 +1961,17 @@ fi AC_DEFINE([HAVE_NAPATECH],[1],(Napatech capture card support)) + if test "$napatech_bypass" = "yes"; then + AC_CHECK_LIB(ntapi, NT_FlowOpenAttrInit,NTFLOW="yes",NTFLOW="no") + if test "$NTFLOW" = "yes"; then + echo " Napatech Flow Processing is Enabled (--disable-napatech-bypass if not needed)" + AC_DEFINE([NAPATECH_ENABLE_BYPASS],[1],(Napatech flowdirector support)) + else + echo "Napatech Flow Processing is not available" + fi + else + echo "Napatech Flow Processing is Disabled." + fi fi # liblua @@ -2493,7 +2508,7 @@ fi if test "${enable_ebpf}" = "yes" || test "${enable_unittests}" = "yes"; then AC_DEFINE([CAPTURE_OFFLOAD_MANAGER], [1],[Building flow bypass manager code]) fi -if test "${enable_ebpf}" = "yes" || test "${enable_nfqueue}" = "yes" || test "${enable_pfring}" = "yes" || test "${enable_unittests}" = "yes"; then +if test "${enable_ebpf}" = "yes" || test "${enable_nfqueue}" = "yes" || test "${enable_pfring}" = "yes" || test "${enable_napatech}" = "yes" || test "${enable_unittests}" = "yes"; then AC_DEFINE([CAPTURE_OFFLOAD], [1],[Building flow capture bypass code]) fi diff --git a/doc/userguide/capture-hardware/napatech.rst b/doc/userguide/capture-hardware/napatech.rst index 3d13aa7ce8df..d08d28d386bd 100644 --- a/doc/userguide/capture-hardware/napatech.rst +++ b/doc/userguide/capture-hardware/napatech.rst @@ -91,7 +91,7 @@ the system can be configured: to the core on which the thread is running. 3. Manual-config (legacy): In this mode the underlying Napatech streams are configured - by issuing NTPL commands prior to running suricata. Suricata then connects + by issuing NTPL commands prior to running Suricata. Suricata then connects to the existing streams on startup. Example Configuration - Auto-config without cpu-affinity: @@ -99,15 +99,15 @@ Example Configuration - Auto-config without cpu-affinity: If cpu-affinity is not used it is necessary to explicitly define the streams in the Suricata configuration file. To use this option the following options should -be set in the suricata configuration file: +be set in the Suricata configuration file: 1. Turn off cpu-affinity - 2. Enable the napatech "auto-config" option + 2. Enable the Napatech "auto-config" option 3. Specify the streams that should be created on startup - 4. Specify the ports that will provide traffic to suricata + 4. Specify the ports that will provide traffic to Suricata 5. Specify the hashmode used to distribute traffic to the streams @@ -136,7 +136,7 @@ Stop and restart ntservice after making changes to ntservice:: $ /opt/napatech3/bin/ntstop.sh $ /opt/napatech3/bin/ntstart.sh -Now you are ready to start suricata:: +Now you are ready to start Suricata:: $ suricata -c /usr/local/etc/suricata/suricata.yaml --napatech --runmode workers @@ -148,8 +148,8 @@ worker-cpu-set. To use this option make the following changes to suricata.yaml: 1. Turn on cpu-affinity 2. Specify the worker-cpu-set -3. Enable the napatech "auto-config" option -4. Specify the ports that will provide traffic to suricata +3. Enable the Napatech "auto-config" option +4. Specify the ports that will provide traffic to Suricata 5. Specify the hashmode that will be used to control the distribution of traffic to the different streams/cpus. @@ -188,7 +188,7 @@ Stop and restart ntservice after making changes to ntservice:: $ /opt/napatech3/bin/ntstop.sh -m $ /opt/napatech3/bin/ntstart.sh -m -Now you are ready to start suricata:: +Now you are ready to start Suricata:: $ suricata -c /usr/local/etc/suricata/suricata.yaml --napatech --runmode workers @@ -196,13 +196,18 @@ Example Configuration - Manual Configuration -------------------------------------------- For Manual Configuration the Napatech streams are created by running NTPL -commands prior to running Suricata. In this example we will setup the Napatech -capture accelerator to merge all physical ports, and then distribute the merged -traffic to four streams that Suricata will ingest. +commands prior to running Suricata. + +Note that this option is provided primarily for legacy configuations as previously +this was the only way to configure Napatech products. Newer capabilities such as +flow-awareness and inline processing cannot be configured manually. + +In this example we will setup the Napatech capture accelerator to merge all physical +ports, and then distribute the merged traffic to four streams that Suricata will ingest. The steps for this configuration are: - 1. Disable the napatech auto-config option in suricata.yaml - 2. Specify the streams that suricata is to use in suricata.yaml + 1. Disable the Napatech auto-config option in suricata.yaml + 2. Specify the streams that Suricata is to use in suricata.yaml 3. Create a file with NTPL commands to create the underlying Napatech streams. First suricata.yaml should be configured similar to the following:: @@ -232,27 +237,163 @@ Next execute those command using the ntpl tool:: $ /opt/napatech3/bin/ntpl -f -Now you are ready to start suricata:: +Now you are ready to start Suricata:: $ suricata -c /usr/local/etc/suricata/suricata.yaml --napatech --runmode workers It is possible to specify much more elaborate configurations using this option. Simply by -creating the appropriate NTPL file and attaching suricata to the streams. +creating the appropriate NTPL file and attaching Suricata to the streams. + +Bypassing Flows +--------------- + +On flow-aware Napatech products traffic from individual flows can be automatically +dropped or, in the case of inline configurations, forwarded by the hardware after +an inspection of the initial packet(s) of the flow by Suricata. This will save +CPU cycles since Suricata does not process packets for a flow that has already been +adjudicated. This is enabled via the hardware-bypass option in the Napatech section +of the configuration file. +When hardware bypass is used it is important that the ports accepting upstream +and downstream traffic from the network are configured with information on +which port the two sides of the connection will arrive. This is needed for the +hardware to properly process traffic in both directions. This is indicated in the +"ports" section as a hyphen separated list of port-pairs that will be receiving +upstream and downstream traffic E.g.:: + + napatech: + hardware-bypass: true + ports[0-1,2-3] + +Note that these "port-pairings" are also required for IDS configurations as the hardware +needs to know on which port(s) two sides of the connection will arrive. + +For configuations relying on optical taps the two sides of the pairing will typically +be different ports. For SPAN port configurations where both upstream and downstream traffic +are delivered to a single port both sides of the "port-pair" will reference the same port. + +For example tap configuations have a form similar to this:: + + ports[0-1,2-3] + +Whereas SPAN port configuations it would look similar to this:: + + ports[0-0,1-1,2-2,3-3] + +Note that SPAN and tap configurations may be combined on the same adapter. + +There are multiple ways that Suricata can be configured to bypass traffic. +One way is to enable stream.bypass in the configuration file. E.g.:: + + stream: + bypass: true + +When enabled once Suricata has evaluated the first chunk of the stream (the +size of which is also configurable) it will indicate that the rest of the +packets in the flow can be bypassed. In IDS mode this means that the subsequent +packets of the flow will be dropped and not delivered to Suricata. In inline +operation the packets will be transmitted on the output port but not delivered +to Suricata. + +Another way is by specifying the "bypass" keyword in a rule. When a rule is +triggered with this keyword then the "pass" or "drop" action will be applied +to subsequent packets of the flow automatically without further analysis by +Suricata. For example given the rule:: + + drop tcp any 443 <> any any (msg: "SURICATA Test rule"; bypass; sid:1000001; rev:2;) + +Once Suricata initially evaluates the fist packet(s) and identifies the flow, +all subsequent packets from the flow will be dropped by the hardware; thus +saving CPU cycles for more important tasks. + +The timeout value for how long to wait before evicting stale flows from the +hardware flow table can be specified via the FlowTimeout attribute in ntservice.ini. + +Inline Operation +---------------- + +Napatech flow-aware products can be configured for inline operation. This is +specified in the configuration file. When enabled, ports are specified as +port-pairs. With traffic received from one port it is transmitted out the +the peer port after inspection by Suricata. E.g. the configuration:: + + napatech: + inline: enabled + ports[0-1, 2-3] + +Will pair ports 0 and 1; and 2 and 3 as peers. Rules can be defined to +pass traffic matching a given signature. For example, given the rule:: + + pass tcp any 443 <> any any (msg: "SURICATA Test rule"; bypass; sid:1000001; rev:2;) + +Suricata will evaluate the inital packet(s) of the flow and program the flow +into the hardware. Subsequent packets from the flow will be automatically be +shunted from one port to it's peer. + Counters -------- -For each stream that is being processed the following counters will be output in stats.log: +The following counters are available: + +- napa_total.pkts - The total of packets received by the card. + +- napa_total.byte - The total count of bytes received by the card. + +- napa_total.overflow_drop_pkts - The number of packets that were dropped because + the host buffers were full. (I.e. the application is not able to process + packets quickly enough.) + +- napa_total.overflow_drop_byte - The number of bytes that were dropped because + the host buffers were full. (I.e. the application is not able to process + packets quickly enough.) + +On flow-aware products the following counters are also available: + +- napa_dispatch_host.pkts, napa_dispatch_host.byte: + + The total number of packets/bytes that were dispatched to a host buffer for + processing by Suricata. (Note: this count includes packets that may be + subsequently dropped if there is no room in the host buffer.) + +- napa_dispatch_drop.pkts, napa_dispatch_drop.byte: + + The total number of packets/bytes that were dropped at the hardware as + a result of a Suricata "drop" bypass rule or other ajudication by + Suricata that the flow packets should be dropped. These packets are not + delivered to the application. + +- napa_dispatch_fwd.pkts, napa_dispatch_fwd.byte: + + When inline operation is configured this is the total number of packets/bytes + that were forwarded as result of a Suricata "pass" bypass rule or as a result + of stream or encryption bypass being enabled in the configuration file. + These packets were not delivered to the application. + +- napa_bypass.active_flows: + + The number of flows actively programmed on the hardware to be forwared or dropped. + +- napa_bypass.total_flows: -- nt.pkts - The number of packets recieved by the stream. + The total count of flows programmed since the application started. -- nt.bytes - The total bytes received by the stream. +If enable-stream-stats is enabled in the configuration file then, for each stream +that is being processed, the following counters will be output in stats.log: -- nt.drop - The number of packets that were dropped from this stream due to buffer overflow conditions. +- napa.pkts: The number of packets received by the stream. + +- napa.bytes: The total bytes received by the stream. + +- napa.drop_pkts: The number of packets dropped from this stream due to buffer overflow conditions. + +- napa.drop_byte: The number of bytes dropped from this stream due to buffer overflow conditions. + +This is useful for fine-grain debugging to determine if a specific CPU core or +thread is falling behind resulting in dropped packets. If hba is enabled the following counter will also be provided: -- nt.hba_drop - the number of packets dropped because the host buffer allowance high-water mark was reached. +- napa.hba_drop: the number of packets dropped because the host buffer allowance high-water mark was reached. In addition to counters host buffer utilization is tracked and logged. This is also useful for debugging. Log messages are output for both Host and On-Board buffers when reach 25, 50, 75 @@ -263,11 +404,11 @@ Debugging: For debugging configurations it is useful to see what traffic is flowing as well as what streams are created and receiving traffic. There are two tools in /opt/napatech3/bin that are useful for this: - - monitoring: this tool will, among other things, show what traffic is arriving at the port interfaces. +- monitoring: this tool will, among other things, show what traffic is arriving at the port interfaces. - - profiling: this will show host-buffers, streams and traffic flow to the streams. +- profiling: this will show host-buffers, streams and traffic flow to the streams. -If suricata terminates abnormally stream definitions, which are normally removed at shutdown, may remain in effect. +If Suricata terminates abnormally stream definitions, which are normally removed at shutdown, may remain in effect. If this happens they can be cleared by issuing the "delete=all" NTPL command as follows:: # /opt/napatech3/bin/ntpl -e "delete=all" @@ -275,7 +416,7 @@ If this happens they can be cleared by issuing the "delete=all" NTPL command as Napatech configuration options: ------------------------------- -These are the Napatech options available in the suricata configuration file:: +These are the Napatech options available in the Suricata configuration file:: napatech: # The Host Buffer Allowance for all streams @@ -289,31 +430,64 @@ These are the Napatech options available in the suricata configuration file:: # When set to "no" the streams config array will be used. # # This option necessitates running the appropriate NTPL commands to create - # the desired streams prior to running suricata. + # the desired streams prior to running Suricata. #use-all-streams: no - # The streams to listen on when cpu-affinity or auto-config is disabled. - # This can be either: - # a list of individual streams (e.g. streams: [0,1,2,3]) + # The streams to listen on when auto-config is disabled or when threading + # cpu-affinity is disabled. This can be either: + # an individual stream (e.g. streams: [0]) # or # a range of streams (e.g. streams: ["0-3"]) # - #streams: ["0-7"] + streams: ["0-3"] + + # Stream stats can be enabled to provide fine grain packet and byte counters + # for each thread/stream that is configured. # - # When auto-config is enabled the streams will be created and assigned to the - # NUMA node where the thread resides automatically. The streams will be created + enable-stream-stats: no + + # When auto-config is enabled the streams will be created and assigned + # automatically to the NUMA node where the thread resides. If cpu-affinity + # is enabled in the threading section, then the streams will be created # according to the number of worker threads specified in the worker cpu set. - # (I.e. the value of threading.cpu-affinity.worker-cpu-set.cpu.) + # Otherwise, the streams array is used to define the streams. # # This option cannot be used simultaneous with "use-all-streams". # auto-config: yes + + # Enable hardware level flow bypass. + # + hardware-bypass: yes + + # Enable inline operation. When enabled traffic arriving on a given port is + # automatically forwarded out it's peer port after analysis by Suricata. + # hardware-bypass must be enabled when this is enabled. # + inline: no + # Ports indicates which napatech ports are to be used in auto-config mode. - # these are the port ID's of the ports that will merged prior to the traffic - # being distributed to the streams. + # these are the port ID's of the ports that will be merged prior to the + # traffic being distributed to the streams. + # + # When hardware-bypass is enabled the ports must be configured as a segment + # specify the port(s) on which upstream and downstream traffic will arrive. + # This information is necessary for the hardware to properly process flows. + # + # When using a tap configuration one of the ports will receive inbound traffic + # for the network and the other will receive outbound traffic. The two ports on a + # given segment must reside on the same network adapter. # - # This can be specified in any of the following ways: + # When using a SPAN-port configuration the upstream and downstream traffic + # arrives on a single port. This is configured by setting the two sides of the + # segment to reference the same port. (e.g. 0-0 to configure a SPAN port on + # port 0). + # + # port segments are specified in the form: + # ports: [0-1,2-3,4-5,6-6,7-7] + # + # For legacy systems when hardware-bypass is disabled this can be specified in any + # of the following ways: # # a list of individual ports (e.g. ports: [0,1,2,3]) # @@ -322,10 +496,10 @@ These are the Napatech options available in the suricata configuration file:: # "all" to indicate that all ports are to be merged together # (e.g. ports: [all]) # - # This has no effect if auto-config is disabled. - # - ports: [all] + # This parameter has no effect if auto-config is disabled. # + ports: [0-1,2-3] + # When auto-config is enabled the hashmode specifies the algorithm for # determining to which stream a given packet is to be delivered. # This can be any valid Napatech NTPL hashmode command. @@ -335,13 +509,13 @@ These are the Napatech options available in the suricata configuration file:: # # See Napatech NTPL documentation other hashmodes and details on their use. # - # This has no effect if auto-config is disabled. + # This parameter has no effect if auto-config is disabled. # hashmode: hash5tuplesorted *Note: hba is useful only when a stream is shared with another application. When hba is enabled packets will be dropped -(i.e. not delivered to suricata) when the host-buffer utilization reaches the high-water mark indicated by the hba value. -This insures that, should suricata get behind in it's packet processing, the other application will still receive all +(i.e. not delivered to Suricata) when the host-buffer utilization reaches the high-water mark indicated by the hba value. +This insures that, should Suricata get behind in its packet processing, the other application will still receive all of the packets. If this is enabled without another application sharing the stream it will result in sub-optimal packet buffering.* diff --git a/src/runmode-napatech.c b/src/runmode-napatech.c index e017e135cea3..d339b2f7d58f 100644 --- a/src/runmode-napatech.c +++ b/src/runmode-napatech.c @@ -40,6 +40,8 @@ #define NT_RUNMODE_AUTOFP 1 #define NT_RUNMODE_WORKERS 2 +static const char *default_mode = "workers"; + #ifdef HAVE_NAPATECH #define MAX_STREAMS 256 @@ -47,6 +49,7 @@ static uint16_t num_configured_streams = 0; static uint16_t first_stream = 0xffff; static uint16_t last_stream = 0xffff; static int auto_config = 0; +static int use_hw_bypass = 0; uint16_t NapatechGetNumConfiguredStreams(void) { @@ -68,11 +71,16 @@ bool NapatechIsAutoConfigEnabled(void) return (auto_config != 0); } +bool NapatechUseHWBypass(void) +{ + return (use_hw_bypass != 0); +} + #endif const char *RunModeNapatechGetDefaultMode(void) { - return "workers"; + return default_mode; } void RunModeNapatechRegister(void) @@ -103,8 +111,25 @@ static int NapatechRegisterDeviceStreams(void) SCLogInfo("napatech.auto-config not found in config file. Defaulting to disabled."); } + if (ConfGetBool("napatech.hardware-bypass", &use_hw_bypass) == 0) { + SCLogInfo("napatech.hardware-bypass not found in config file. Defaulting to disabled."); + } + + /* use_all_streams uses existing streams created prior to starting Suricata. auto_config + * automatically creates streams. Therefore, these two options are mutually exclusive. + */ if (use_all_streams && auto_config) { - SCLogError(SC_ERR_RUNMODE, "auto-config cannot be used with use-all-streams."); + SCLogError(SC_ERR_RUNMODE, "napatech.auto-config cannot be used in configuration file at the same time as napatech.use-all-streams."); + exit(EXIT_FAILURE); + } + + /* to use hardware_bypass we need to configure the streams to be consistent. + * with the rest of the configuration. Therefore auto_config is not a valid + * option. + */ + if (use_hw_bypass && auto_config == 0) { + SCLogError(SC_ERR_RUNMODE, "napatech auto-config must be enabled when using napatech.use_hw_bypass."); + exit(EXIT_FAILURE); } /* Get the stream ID's either from the conf or by querying Napatech */ @@ -129,7 +154,9 @@ static int NapatechRegisterDeviceStreams(void) "Registering Napatech device: %s - active stream found.", plive_dev_buf); SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, - "Delete the stream or disable auto-config before running."); + "run /opt/napatech3/bin/ntpl -e \"delete=all\" to delete existing stream"); + SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, + "or disable auto-config in the conf file before running."); exit(EXIT_FAILURE); } } else { @@ -218,6 +245,24 @@ static int NapatechInit(int runmode) SCLogInfo("Host Buffer Allowance: %d", (int) conf->hba); } + if (use_hw_bypass) { +#ifdef NAPATECH_ENABLE_BYPASS + if (NapatechInitFlowStreams()) { + SCLogInfo("Napatech Hardware Bypass is supported and enabled."); + } else { + SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, + "Napatech Hardware Bypass requested in conf but is not supported by the hardware."); + exit(EXIT_FAILURE); + } +#else + SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, + "Napatech Hardware Bypass requested in conf but is not enabled by the software."); + exit(EXIT_FAILURE); +#endif + } else { + SCLogInfo("Hardware Bypass is disabled in the conf file."); + } + /* Start a thread to process the statistics */ NapatechStartStats(); diff --git a/src/runmode-napatech.h b/src/runmode-napatech.h index 1270c9468cce..b80d77732500 100644 --- a/src/runmode-napatech.h +++ b/src/runmode-napatech.h @@ -29,9 +29,7 @@ #ifdef HAVE_NAPATECH #include "util-napatech.h" #include - - -#endif +#endif /* HAVE_NAPATECH */ int RunModeNapatechAutoFp(void); int RunModeNapatechWorkers(void); @@ -41,9 +39,8 @@ const char *RunModeNapatechGetDefaultMode(void); uint16_t NapatechGetNumConfiguredStreams(void); uint16_t NapatechGetNumFirstStream(void); uint16_t NapatechGetNumLastStream(void); - bool NapatechIsAutoConfigEnabled(void); - +bool NapatechUseHWBypass(void); #endif /* __RUNMODE_NAPATECH_H__ */ diff --git a/src/source-napatech.c b/src/source-napatech.c index 2b9ba58ec2bb..790af284ee8e 100644 --- a/src/source-napatech.c +++ b/src/source-napatech.c @@ -18,14 +18,13 @@ /** * \file * -- * \author nPulse Technologies, LLC. -- * \author Matt Keeler + - * \author nPulse Technologies, LLC. + - * \author Matt Keeler * * * Support for NAPATECH adapter with the 3GD Driver/API. * Requires libntapi from Napatech A/S. * */ - #include "suricata-common.h" #include "suricata.h" #include "threadvars.h" @@ -40,7 +39,7 @@ #ifndef HAVE_NAPATECH -TmEcode NoNapatechSupportExit(ThreadVars *, const void *, void **); +TmEcode NoNapatechSupportExit(ThreadVars*, const void*, void**); void TmModuleNapatechStreamRegister(void) { @@ -69,20 +68,21 @@ TmEcode NoNapatechSupportExit(ThreadVars *tv, const void *initdata, void **data) { SCLogError(SC_ERR_NAPATECH_NOSUPPORT, "Error creating thread %s: you do not have support for Napatech adapter " - "enabled please recompile with --enable-napatech", tv->name); + "enabled please recompile with --enable-napatech", + tv->name); exit(EXIT_FAILURE); } #else /* Implied we do have NAPATECH support */ + #include #include -#define MAX_STREAMS 256 - extern int max_pending_packets; -typedef struct NapatechThreadVars_ { +typedef struct NapatechThreadVars_ +{ ThreadVars *tv; NtNetStreamRx_t rx_stream; uint16_t stream_id; @@ -90,10 +90,13 @@ typedef struct NapatechThreadVars_ { TmSlot *slot; } NapatechThreadVars; +#ifdef NAPATECH_ENABLE_BYPASS +static int NapatechBypassCallback(Packet *p); +#endif TmEcode NapatechStreamThreadInit(ThreadVars *, const void *, void **); void NapatechStreamThreadExitStats(ThreadVars *, void *); -TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot); +TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot); TmEcode NapatechDecodeThreadInit(ThreadVars *, const void *, void **); TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data); @@ -115,6 +118,12 @@ SC_ATOMIC_DECLARE(uint16_t, numa1_count); SC_ATOMIC_DECLARE(uint16_t, numa2_count); SC_ATOMIC_DECLARE(uint16_t, numa3_count); +SC_ATOMIC_DECLARE(uint64_t, flow_callback_cnt); +SC_ATOMIC_DECLARE(uint64_t, flow_callback_handled_pkts); +SC_ATOMIC_DECLARE(uint64_t, flow_callback_udp_pkts); +SC_ATOMIC_DECLARE(uint64_t, flow_callback_tcp_pkts); +SC_ATOMIC_DECLARE(uint64_t, flow_callback_unhandled_pkts); + /** * \brief Register the Napatech receiver (reader) module. */ @@ -123,7 +132,7 @@ void TmModuleNapatechStreamRegister(void) tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream"; tmm_modules[TMM_RECEIVENAPATECH].ThreadInit = NapatechStreamThreadInit; tmm_modules[TMM_RECEIVENAPATECH].Func = NULL; - tmm_modules[TMM_RECEIVENAPATECH].PktAcqLoop = NapatechPacketLoopZC; + tmm_modules[TMM_RECEIVENAPATECH].PktAcqLoop = NapatechPacketLoop; tmm_modules[TMM_RECEIVENAPATECH].PktAcqBreakLoop = NULL; tmm_modules[TMM_RECEIVENAPATECH].ThreadExitPrintStats = NapatechStreamThreadExitStats; tmm_modules[TMM_RECEIVENAPATECH].ThreadDeinit = NapatechStreamThreadDeinit; @@ -140,6 +149,12 @@ void TmModuleNapatechStreamRegister(void) SC_ATOMIC_INIT(numa1_count); SC_ATOMIC_INIT(numa2_count); SC_ATOMIC_INIT(numa3_count); + + SC_ATOMIC_INIT(flow_callback_cnt); + SC_ATOMIC_INIT(flow_callback_handled_pkts); + SC_ATOMIC_INIT(flow_callback_udp_pkts); + SC_ATOMIC_INIT(flow_callback_tcp_pkts); + SC_ATOMIC_INIT(flow_callback_unhandled_pkts); } /** @@ -157,6 +172,437 @@ void TmModuleNapatechDecodeRegister(void) tmm_modules[TMM_DECODENAPATECH].flags = TM_FLAG_DECODE_TM; } +#ifdef NAPATECH_ENABLE_BYPASS +/** + * \brief template of IPv4 header + */ +struct ipv4_hdr +{ + uint8_t version_ihl; /**< version and header length */ + uint8_t type_of_service; /**< type of service */ + uint16_t total_length; /**< length of packet */ + uint16_t packet_id; /**< packet ID */ + uint16_t fragment_offset; /**< fragmentation offset */ + uint8_t time_to_live; /**< time to live */ + uint8_t next_proto_id; /**< protocol ID */ + uint16_t hdr_checksum; /**< header checksum */ + uint32_t src_addr; /**< source address */ + uint32_t dst_addr; /**< destination address */ +} __attribute__ ((__packed__)); + +/** + * \brief template of IPv6 header + */ +struct ipv6_hdr +{ + uint32_t vtc_flow; /**< IP version, traffic class & flow label. */ + uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */ + uint8_t proto; /**< Protocol, next header. */ + uint8_t hop_limits; /**< Hop limits. */ + uint8_t src_addr[16]; /**< IP address of source host. */ + uint8_t dst_addr[16]; /**< IP address of destination host(s). */ +} __attribute__ ((__packed__)); + +/** + * \brief template of UDP header + */ +struct udp_hdr +{ + uint16_t src_port; /**< UDP source port. */ + uint16_t dst_port; /**< UDP destination port. */ + uint16_t dgram_len; /**< UDP datagram length */ + uint16_t dgram_cksum; /**< UDP datagram checksum */ +} __attribute__ ((__packed__)); + +/** + * \brief template of TCP header + */ +struct tcp_hdr +{ + uint16_t src_port; /**< TCP source port. */ + uint16_t dst_port; /**< TCP destination port. */ + uint32_t sent_seq; /**< TX data sequence number. */ + uint32_t recv_ack; /**< RX data acknowledgement sequence number. */ + uint8_t data_off; /**< Data offset. */ + uint8_t tcp_flags; /**< TCP flags */ + uint16_t rx_win; /**< RX flow control window. */ + uint16_t cksum; /**< TCP checksum. */ + uint16_t tcp_urp; /**< TCP urgent pointer, if any. */ +} __attribute__ ((__packed__)); + + +/* The hardware will assign a "color" value indicating what filters are matched + * by a given packet. These constants indicate what bits are set in the color + * field for different protocols + * + */ +#define RTE_PTYPE_L2_ETHER 0x10000000 +#define RTE_PTYPE_L3_IPV4 0x01000000 +#define RTE_PTYPE_L3_IPV6 0x04000000 +#define RTE_PTYPE_L4_TCP 0x00100000 +#define RTE_PTYPE_L4_UDP 0x00200000 + +/* These masks are used to extract layer 3 and layer 4 protocol + * values from the color field in the packet descriptor. + */ +#define RTE_PTYPE_L3_MASK 0x0f000000 +#define RTE_PTYPE_L4_MASK 0x00f00000 + +#define COLOR_IS_SPAN 0x00001000 + +static int inline_port_map[MAX_PORTS] = { -1 }; + +/** + * \brief Binds two ports together for inline operation. + * + * Get the ID of an adapter on which a given port resides. + * + * \param port one of the ports in a pairing. + * \param peer the other port in a pairing. + * \return ID of the adapter. + * + */ +int NapatechSetPortmap(int port, int peer) +{ + if ((inline_port_map[port] == -1) && (inline_port_map[peer] == -1)) { + inline_port_map[port] = peer; + inline_port_map[peer] = port; + } else { + SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, + "Port pairing is already configured."); + return 0; + } + return 1; +} + +/** + * \brief Returns the ID of the adapter + * + * Get the ID of an adapter on which a given port resides. + * + * \param port for which adapter ID is requested. + * \return ID of the adapter. + * + */ +int NapatechGetAdapter(uint8_t port) +{ + static int port_adapter_map[MAX_PORTS] = { -1 }; + int status; + NtInfo_t h_info; /* Info handle */ + NtInfoStream_t h_info_stream; /* Info stream handle */ + + if (unlikely(port_adapter_map[port] == -1)) { + if ((status = NT_InfoOpen(&h_info_stream, "ExampleInfo")) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status); + return -1; + } + /* Read the system info */ + h_info.cmd = NT_INFO_CMD_READ_PORT_V9; + h_info.u.port_v9.portNo = (uint8_t) port; + if ((status = NT_InfoRead(h_info_stream, &h_info)) != NT_SUCCESS) { + /* Get the status code as text */ + NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status); + NT_InfoClose(h_info_stream); + return -1; + } + port_adapter_map[port] = h_info.u.port_v9.data.adapterNo; + } + return port_adapter_map[port]; +} + +/** + * \brief IPv4 4-tuple convenience structure + */ +struct IPv4Tuple4 +{ + uint32_t sa; /*!< Source address */ + uint32_t da; /*!< Destination address */ + uint16_t sp; /*!< Source port */ + uint16_t dp; /*!< Destination port */ +}; + +/** + * \brief IPv6 4-tuple convenience structure + */ +struct IPv6Tuple4 +{ + uint8_t sa[16]; /*!< Source address */ + uint8_t da[16]; /*!< Destination address */ + uint16_t sp; /*!< Source port */ + uint16_t dp; /*!< Destination port */ +}; + + +/** + * \brief Compares the byte order value of two IPv6 addresses. + * + * + * \param addr_a The first address to compare + * \param addr_b The second adress to compare + * + * \return -1 if addr_a < addr_b + * 1 if addr_a > addr_b + * 0 if addr_a == addr_b + */ +static int CompareIPv6Addr(uint8_t addr_a[16], uint8_t addr_b[16]) { + uint16_t pos; + for (pos = 0; pos < 16; ++pos) { + if (addr_a[pos] < addr_b[pos]) { + return -1; + } else if (addr_a[pos] > addr_b[pos]) { + return 1; + } /* else they are equal - check next position*/ + } + + /* if we get here the addresses are equal */ + return 0; +} + +/** + * \brief Callback function to process Bypass events on Napatech Adapter. + * + * Callback function that sets up the Flow tables on the Napatech card + * so that subsequent packets from this flow are bypassed on the hardware. + * + * \param p packet containing information about the flow to be bypassed + * \param is_inline indicates if Suricata is being run in inline mode. + * + * \return Error code indicating success (1) or failure (0). + * + */ +static int ProgramFlow(Packet *p, int is_inline) +{ + int status; + NtFlow_t flow_match; + memset(&flow_match, 0, sizeof(flow_match)); + + NapatechPacketVars *ntpv = &(p->ntpv); + + int adapter = NapatechGetAdapter(ntpv->dyn3->rxPort); + + NtFlowStream_t *phFlowStream = NapatechGetFlowStreamPtr(adapter); + + + /* + * The hardware decoder will "color" the packets according to the protocols + * in the packet and the port the packet arrived on. packet_type gets + * these bits and we mask out layer3, layer4, and is_span to determine + * the protocols and if the packet is coming in from a SPAN port. + */ + uint32_t packet_type = ((ntpv->dyn3->color_hi << 14) & 0xFFFFC000) | ntpv->dyn3->color_lo; + uint8_t *packet = (uint8_t *) ntpv->dyn3 + ntpv->dyn3->descrLength; + + uint32_t layer3 = packet_type & RTE_PTYPE_L3_MASK; + uint32_t layer4 = packet_type & RTE_PTYPE_L4_MASK; + uint32_t is_span = packet_type & COLOR_IS_SPAN; + + /* + * When we're programming the flows to arrive on a span port, + * where upstream and downstream packets arrive on the same port, + * the hardware is configured to swap the source and dest + * fields if the src addr > dest addr. We need to program the + * flow tables to match. We'll compare addresses and set + * do_swap accordingly. + */ + + uint32_t do_swap = 0; + + SC_ATOMIC_ADD(flow_callback_cnt, 1); + + /* Only bypass TCP and UDP */ + if (PKT_IS_TCP(p)) { + SC_ATOMIC_ADD(flow_callback_tcp_pkts, 1); + } else if PKT_IS_UDP(p) { + SC_ATOMIC_ADD(flow_callback_udp_pkts, 1); + } else { + SC_ATOMIC_ADD(flow_callback_unhandled_pkts, 1); + } + + struct IPv4Tuple4 v4Tuple; + struct IPv6Tuple4 v6Tuple; + struct ipv4_hdr *pIPv4_hdr = NULL; + struct ipv6_hdr *pIPv6_hdr = NULL; + + switch (layer3) { + case RTE_PTYPE_L3_IPV4: + { + pIPv4_hdr = (struct ipv4_hdr *) (packet + ntpv->dyn3->offset0); + + if (!is_span) { + v4Tuple.sa = pIPv4_hdr->src_addr; + v4Tuple.da = pIPv4_hdr->dst_addr; + } else { + do_swap = (pIPv4_hdr->src_addr > pIPv4_hdr->dst_addr); + if (!do_swap) { + /* already in order */ + v4Tuple.sa = pIPv4_hdr->src_addr; + v4Tuple.da = pIPv4_hdr->dst_addr; + } else { /* swap */ + v4Tuple.sa = pIPv4_hdr->dst_addr; + v4Tuple.da = pIPv4_hdr->src_addr; + } + } + break; + } + case RTE_PTYPE_L3_IPV6: + { + pIPv6_hdr = (struct ipv6_hdr *) (packet + ntpv->dyn3->offset0); + do_swap = (CompareIPv6Addr(pIPv6_hdr->src_addr, pIPv6_hdr->dst_addr) > 0); + + if (!is_span) { + memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16); + memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16); + } else { + /* sort src/dest address before programming */ + if (!do_swap) { + /* already in order */ + memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16); + memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16); + } else { /* swap the addresses */ + memcpy(&(v6Tuple.sa), pIPv6_hdr->dst_addr, 16); + memcpy(&(v6Tuple.da), pIPv6_hdr->src_addr, 16); + } + } + break; + } + default: + { + return 0; + } + } + + switch (layer4) { + case RTE_PTYPE_L4_TCP: + { + struct tcp_hdr *tcp_hdr = (struct tcp_hdr *) (packet + ntpv->dyn3->offset1); + if (layer3 == RTE_PTYPE_L3_IPV4) { + if (!is_span) { + v4Tuple.dp = tcp_hdr->dst_port; + v4Tuple.sp = tcp_hdr->src_port; + flow_match.keyId = NAPATECH_KEYTYPE_IPV4; + } else { + if (!do_swap) { + v4Tuple.sp = tcp_hdr->src_port; + v4Tuple.dp = tcp_hdr->dst_port; + } else { + v4Tuple.sp = tcp_hdr->dst_port; + v4Tuple.dp = tcp_hdr->src_port; + } + flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN; + } + memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple)); + } else { + if (!is_span) { + v6Tuple.dp = tcp_hdr->dst_port; + v6Tuple.sp = tcp_hdr->src_port; + flow_match.keyId = NAPATECH_KEYTYPE_IPV6; + } else { + if (!do_swap) { + v6Tuple.sp = tcp_hdr->src_port; + v6Tuple.dp = tcp_hdr->dst_port; + } else { + v6Tuple.dp = tcp_hdr->src_port; + v6Tuple.sp = tcp_hdr->dst_port; + } + flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN; + } + memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple)); + } + flow_match.ipProtocolField = 6; + break; + } + case RTE_PTYPE_L4_UDP: + { + struct udp_hdr *udp_hdr = (struct udp_hdr *) (packet + ntpv->dyn3->offset1); + if (layer3 == RTE_PTYPE_L3_IPV4) { + if (!is_span) { + v4Tuple.dp = udp_hdr->dst_port; + v4Tuple.sp = udp_hdr->src_port; + flow_match.keyId = NAPATECH_KEYTYPE_IPV4; + } else { + if (!do_swap) { + v4Tuple.sp = udp_hdr->src_port; + v4Tuple.dp = udp_hdr->dst_port; + } else { + v4Tuple.dp = udp_hdr->src_port; + v4Tuple.sp = udp_hdr->dst_port; + } + flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN; + } + memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple)); + } else { /* layer3 is IPV6 */ + if (!is_span) { + v6Tuple.dp = udp_hdr->dst_port; + v6Tuple.sp = udp_hdr->src_port; + flow_match.keyId = NAPATECH_KEYTYPE_IPV6; + } else { + if (!do_swap) { + v6Tuple.sp = udp_hdr->src_port; + v6Tuple.dp = udp_hdr->dst_port; + } else { + v6Tuple.dp = udp_hdr->src_port; + v6Tuple.sp = udp_hdr->dst_port; + } + flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN; + } + memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple)); + } + flow_match.ipProtocolField = 17; + break; + } + default: + { + return 0; + } + } + + flow_match.op = 1; /* program flow */ + flow_match.gfi = 1; /* Generate FlowInfo records */ + flow_match.tau = 1; /* tcp automatic unlearn */ + + if (PACKET_TEST_ACTION(p, ACTION_DROP)) { + flow_match.keySetId = NAPATECH_FLOWTYPE_DROP; + } else { + if (is_inline) { + flow_match.keySetId = NAPATECH_FLOWTYPE_PASS; + } else { + flow_match.keySetId = NAPATECH_FLOWTYPE_DROP; + } + } + + status = NT_FlowWrite(*phFlowStream, &flow_match, -1); + if (status == NT_STATUS_TIMEOUT) { + SCLogInfo("NT_FlowWrite returned NT_STATUS_TIMEOUT"); + } else if (status != NT_SUCCESS) { + SCLogError(SC_ERR_NAPATECH_OPEN_FAILED,"NT_FlowWrite failed!."); + exit(EXIT_FAILURE); + } + + return 1; +} + +/** + * \brief Callback from Suricata when a flow that should be bypassed + * is identified. + */ + +static int NapatechBypassCallback(Packet *p) +{ + NapatechPacketVars *ntpv = &(p->ntpv); + + /* + * Since, at this point, we don't know what action to take, + * simply mark this packet as one that should be + * bypassed when the packet is returned by suricata with a + * pass/drop verdict. + */ + ntpv->bypass = 1; + + return 1; +} + +#endif + /** * \brief Initialize the Napatech receiver thread, generate a single * NapatechThreadVar structure for each thread, this will @@ -198,12 +644,27 @@ TmEcode NapatechStreamThreadInit(ThreadVars *tv, const void *initdata, void **da static PacketQueue packets_to_release[MAX_STREAMS]; +/** + * \brief Callback to indicate that the packet buffer can be returned to the hardware. + * + * Called when Suricata is done processing the packet. The packet is placed into + * a queue so that it can be retrieved and released by the packet processing thread. + * + * \param p Packet to return to the system. + * + */ static void NapatechReleasePacket(struct Packet_ *p) { PacketFreeOrRelease(p); PacketEnqueue(&packets_to_release[p->ntpv.stream_id], p); } +/** + * \brief Returns the NUMA node associated with the currently running thread. + * + * \return ID of the NUMA node. + * + */ static int GetNumaNode(void) { int cpu = 0; @@ -220,6 +681,12 @@ static int GetNumaNode(void) return node; } +/** + * \brief Outputs hints on the optimal host-buffer configuration to aid tuning. + * + * \param log_level of the currently running instance. + * + */ static void RecommendNUMAConfig(SCLogLevel log_level) { char string0[16]; @@ -236,17 +703,20 @@ static void RecommendNUMAConfig(SCLogLevel log_level) SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, "Minimum host buffers that should be defined in ntservice.ini:"); - SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, - " NUMA Node 0: %d", (SC_ATOMIC_GET(numa0_count))); + SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, " NUMA Node 0: %d", + (SC_ATOMIC_GET(numa0_count))); - if (numa_max_node() >= 1) SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, - " NUMA Node 1: %d ", (SC_ATOMIC_GET(numa1_count))); + if (numa_max_node() >= 1) + SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, + " NUMA Node 1: %d ", (SC_ATOMIC_GET(numa1_count))); - if (numa_max_node() >= 2) SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, - " NUMA Node 2: %d ", (SC_ATOMIC_GET(numa2_count))); + if (numa_max_node() >= 2) + SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, + " NUMA Node 2: %d ", (SC_ATOMIC_GET(numa2_count))); - if (numa_max_node() >= 3) SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, - " NUMA Node 3: %d ", (SC_ATOMIC_GET(numa3_count))); + if (numa_max_node() >= 3) + SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, + " NUMA Node 3: %d ", (SC_ATOMIC_GET(numa3_count))); snprintf(string0, 16, "[%d, 16, 0]", SC_ATOMIC_GET(numa0_count)); snprintf(string1, 16, (numa_max_node() >= 1 ? ",[%d, 16, 1]" : ""), @@ -257,14 +727,23 @@ static void RecommendNUMAConfig(SCLogLevel log_level) SC_ATOMIC_GET(numa3_count)); SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, - "E.g.: HostBuffersRx=%s%s%s%s", string0, string1, string2, string3); + "E.g.: HostBuffersRx=%s%s%s%s", string0, string1, string2, + string3); } else if (log_level == SC_LOG_ERROR) { SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, - "Or, try running /opt/napatech3/bin/ntpl -e \"delete=all\" to clean-up stream NUMA config."); + "Or, try running /opt/napatech3/bin/ntpl -e \"delete=all\" to clean-up stream NUMA config."); } } -TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot) +/** + * \brief Main Napatechpacket processing loop + * + * \param tv Thread variable to ThreadVars + * \param data Pointer to NapatechThreadVars with data specific to Napatech + * \param slot TMSlot where this instance is running. + * + */ +TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot) { int32_t status; char error_buffer[100]; @@ -274,27 +753,39 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot) uint64_t hba_pkt_drops = 0; uint64_t hba_byte_drops = 0; uint16_t hba_pkt = 0; - uint32_t filter_id = 0; - uint32_t hash_id = 0; - uint32_t numa_node = 0; + int numa_node = -1; int set_cpu_affinity = 0; - + int closer = 0; + int is_inline = 0; + int is_autoconfig = 0; /* This just keeps the startup output more orderly. */ usleep(200000 * ntv->stream_id); - if (NapatechIsAutoConfigEnabled()) { + if (ConfGetBool("napatech.inline", &is_inline) == 0) { + is_inline = 0; + } + if (ConfGetBool("napatech.auto-config", &is_autoconfig) == 0) { + is_autoconfig = 0; + } + + if (is_autoconfig) { numa_node = GetNumaNode(); switch (numa_node) { - case 0: SC_ATOMIC_ADD(numa0_count, 1); - break; - case 1: SC_ATOMIC_ADD(numa1_count, 1); - break; - case 2: SC_ATOMIC_ADD(numa2_count, 1); - break; - case 3: SC_ATOMIC_ADD(numa3_count, 1); - break; - default: break; + case 0: + SC_ATOMIC_ADD(numa0_count, 1); + break; + case 1: + SC_ATOMIC_ADD(numa1_count, 1); + break; + case 2: + SC_ATOMIC_ADD(numa2_count, 1); + break; + case 3: + SC_ATOMIC_ADD(numa3_count, 1); + break; + default: + break; } if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) { @@ -304,37 +795,41 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot) if (set_cpu_affinity) { NapatechSetupNuma(ntv->stream_id, numa_node); } - } - if (NapatechIsAutoConfigEnabled()) { numa_node = GetNumaNode(); SC_ATOMIC_ADD(stream_count, 1); if (SC_ATOMIC_GET(stream_count) == NapatechGetNumConfiguredStreams()) { - /* The last thread to run sets up the streams */ - status = NapatechSetupTraffic(NapatechGetNumFirstStream(), - NapatechGetNumLastStream(), - &filter_id, &hash_id); - if (filter_id == 0) { +#ifdef NAPATECH_ENABLE_BYPASS + /* Initialize the port map before we setup traffic filters */ + for (int i = 0; i < MAX_PORTS; ++i) { + inline_port_map[i] = -1; + } +#endif + /* The last thread to run sets up and deletes the streams */ + status = NapatechSetupTraffic(NapatechGetNumFirstStream(), + NapatechGetNumLastStream()); - if (status == 0x20002061) { - SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, - "Check host buffer configuration in ntservice.ini."); - RecommendNUMAConfig(SC_LOG_ERROR); + closer = 1; - } else if (filter_id == 0x20000008) { - SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, - "Check napatech.ports in the suricata config file."); - } + if (status == 0x20002061) { + SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, + "Check host buffer configuration in ntservice.ini."); + RecommendNUMAConfig(SC_LOG_ERROR); + exit(EXIT_FAILURE); + } else if (status == 0x20000008) { + SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, + "Check napatech.ports in the suricata config file."); exit(EXIT_FAILURE); } - - RecommendNUMAConfig(SC_LOG_INFO); + RecommendNUMAConfig(SC_LOG_PERF); + SCLogNotice("Napatech packet input engine started."); } - } + } // is_autoconfig - SCLogInfo("Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u ", + SCLogInfo( + "Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u ", sched_getcpu(), numa_node, ntv->stream_id); if (ntv->hba > 0) { @@ -352,13 +847,12 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot) SCLogDebug("Opening NAPATECH Stream: %lu for processing", ntv->stream_id); if ((status = NT_NetRxOpen(&(ntv->rx_stream), "SuricataStream", - NT_NET_INTERFACE_PACKET, ntv->stream_id, ntv->hba)) != NT_SUCCESS) { + NT_NET_INTERFACE_PACKET, ntv->stream_id, ntv->hba)) != NT_SUCCESS) { NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status); SCFree(ntv); SCReturnInt(TM_ECODE_FAILED); } - TmSlot *s = (TmSlot *) slot; ntv->slot = s->slot_next; @@ -369,16 +863,21 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot) /* Napatech returns packets 1 at a time */ status = NT_NetRxGet(ntv->rx_stream, &packet_buffer, 1000); - if (unlikely(status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) { + if (unlikely( + status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) { continue; } else if (unlikely(status != NT_SUCCESS)) { NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status); - SCLogInfo("Failed to read from Napatech Stream%d: %s", + SCLogInfo("Failed to read from Napatech Stream %d: %s", ntv->stream_id, error_buffer); - SCReturnInt(TM_ECODE_FAILED); + break; } Packet *p = PacketGetFromQueueOrAlloc(); +#ifdef NAPATECH_ENABLE_BYPASS + p->ntpv.bypass = 0; +#endif + if (unlikely(p == NULL)) { NT_NetRxRelease(ntv->rx_stream, packet_buffer); SCReturnInt(TM_ECODE_FAILED); @@ -402,14 +901,12 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot) break; case NT_TIMESTAMP_TYPE_PCAP_NANOTIME: p->ts.tv_sec = pkt_ts >> 32; - p->ts.tv_usec = ( (pkt_ts & 0xFFFFFFFF) / 1000) - + (pkt_ts % 1000) > 500 ? 1 : 0; + p->ts.tv_usec = ((pkt_ts & 0xFFFFFFFF) / 1000) + (pkt_ts % 1000) > 500 ? 1 : 0; break; case NT_TIMESTAMP_TYPE_NATIVE_NDIS: /* number of seconds between 1/1/1601 and 1/1/1970 */ p->ts.tv_sec = (pkt_ts / 100000000) - 11644473600; - p->ts.tv_usec = ( (pkt_ts % 100000000) / 100) - + (pkt_ts % 100) > 50 ? 1 : 0; + p->ts.tv_usec = ((pkt_ts % 100000000) / 100) + (pkt_ts % 100) > 50 ? 1 : 0; break; default: SCLogError(SC_ERR_NAPATECH_TIMESTAMP_TYPE_NOT_SUPPORTED, @@ -425,8 +922,8 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot) /* Update drop counter */ if (unlikely((status = NT_NetRxRead(ntv->rx_stream, &stat_cmd)) != NT_SUCCESS)) { NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status); - SCLogInfo("Couldn't retrieve drop statistics from the RX stream: %u - %s", - ntv->stream_id, error_buffer); + SCLogInfo("Couldn't retrieve drop statistics from the RX stream: %u", + ntv->stream_id); } else { hba_pkt_drops = stat_cmd.u.streamDrop.pktsDropped; @@ -435,15 +932,17 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot) StatsSyncCountersIfSignalled(tv); } +#ifdef NAPATECH_ENABLE_BYPASS + p->ntpv.dyn3 = _NT_NET_GET_PKT_DESCR_PTR_DYN3(packet_buffer); + p->BypassPacketsFlow = (NapatechIsBypassSupported() ? NapatechBypassCallback : NULL); + NT_NET_SET_PKT_TXPORT(packet_buffer, inline_port_map[p->ntpv.dyn3->rxPort]); +#endif p->ReleasePacket = NapatechReleasePacket; p->ntpv.nt_packet_buf = packet_buffer; p->ntpv.stream_id = ntv->stream_id; p->datalink = LINKTYPE_ETHERNET; - if (unlikely(PacketSetData(p, - (uint8_t *) NT_NET_GET_PKT_L2_PTR(packet_buffer), - NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) { - + if (unlikely(PacketSetData(p, (uint8_t *)NT_NET_GET_PKT_L2_PTR(packet_buffer), NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) { TmqhOutputPacketpool(ntv->tv, p); NT_NetRxRelease(ntv->rx_stream, packet_buffer); SCReturnInt(TM_ECODE_FAILED); @@ -458,23 +957,31 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot) /* Release any packets that were returned by the callback function */ Packet *rel_pkt = PacketDequeue(&packets_to_release[ntv->stream_id]); while (rel_pkt != NULL) { +#ifdef NAPATECH_ENABLE_BYPASS + if (rel_pkt->ntpv.bypass == 1) { + if (PACKET_TEST_ACTION(p, ACTION_DROP)) { + if (is_inline) { + rel_pkt->ntpv.dyn3->wireLength = 0; + } + } + ProgramFlow(rel_pkt, is_inline); + } +#endif NT_NetRxRelease(ntv->rx_stream, rel_pkt->ntpv.nt_packet_buf); rel_pkt = PacketDequeue(&packets_to_release[ntv->stream_id]); } StatsSyncCountersIfSignalled(tv); - } - - if (filter_id) { - NapatechDeleteFilter(filter_id); - } + } // while - if (hash_id) { - NapatechDeleteFilter(hash_id); + if (closer) { +#ifdef NAPATECH_ENABLE_BYPASS + NapatechCloseFlowStreams(); +#endif + NapatechDeleteFilters(); } if (unlikely(ntv->hba > 0)) { - SCLogInfo("Host Buffer Allowance Drops - pkts: %ld, bytes: %ld", - hba_pkt_drops, hba_byte_drops); + SCLogInfo("Host Buffer Allowance Drops - pkts: %ld, bytes: %ld", hba_pkt_drops, hba_byte_drops); } SCReturnInt(TM_ECODE_OK); @@ -492,26 +999,34 @@ void NapatechStreamThreadExitStats(ThreadVars *tv, void *data) NapatechCurrentStats stat = NapatechGetCurrentStats(ntv->stream_id); double percent = 0; - if (stat.current_drops > 0) - percent = (((double) stat.current_drops) - / (stat.current_packets + stat.current_drops)) * 100; + if (stat.current_drop_packets > 0) + percent = (((double) stat.current_drop_packets) + / (stat.current_packets + stat.current_drop_packets)) * 100; SCLogInfo("nt%lu - pkts: %lu; drop: %lu (%5.2f%%); bytes: %lu", - (uint64_t) ntv->stream_id, stat.current_packets, - stat.current_drops, percent, stat.current_bytes); + (uint64_t) ntv->stream_id, stat.current_packets, + stat.current_drop_packets, percent, stat.current_bytes); SC_ATOMIC_ADD(total_packets, stat.current_packets); - SC_ATOMIC_ADD(total_drops, stat.current_drops); + SC_ATOMIC_ADD(total_drops, stat.current_drop_packets); SC_ATOMIC_ADD(total_tallied, 1); if (SC_ATOMIC_GET(total_tallied) == NapatechGetNumConfiguredStreams()) { if (SC_ATOMIC_GET(total_drops) > 0) percent = (((double) SC_ATOMIC_GET(total_drops)) / (SC_ATOMIC_GET(total_packets) - + SC_ATOMIC_GET(total_drops))) * 100; + + SC_ATOMIC_GET(total_drops))) * 100; SCLogInfo(" "); SCLogInfo("--- Total Packets: %ld Total Dropped: %ld (%5.2f%%)", SC_ATOMIC_GET(total_packets), SC_ATOMIC_GET(total_drops), percent); + +#ifdef NAPATECH_ENABLE_BYPASS + SCLogInfo("--- BypassCB - Total: %ld, UDP: %ld, TCP: %ld, Unhandled: %ld", + SC_ATOMIC_GET(flow_callback_cnt), + SC_ATOMIC_GET(flow_callback_udp_pkts), + SC_ATOMIC_GET(flow_callback_tcp_pkts), + SC_ATOMIC_GET(flow_callback_unhandled_pkts)); +#endif } } @@ -524,13 +1039,13 @@ TmEcode NapatechStreamThreadDeinit(ThreadVars *tv, void *data) { SCEnter(); NapatechThreadVars *ntv = (NapatechThreadVars *) data; + SCLogDebug("Closing Napatech Stream: %d", ntv->stream_id); NT_NetRxClose(ntv->rx_stream); + SCReturnInt(TM_ECODE_OK); } -/** Decode Napatech */ - /** * \brief This function passes off to link type decoders. * @@ -563,7 +1078,7 @@ TmEcode NapatechDecode(ThreadVars *tv, Packet *p, void *data, PacketQueue *pq, break; default: SCLogError(SC_ERR_DATALINK_UNIMPLEMENTED, - "Error: datalink type %" PRId32 " not yet supported in module NapatechDecode", + "Datalink type %" PRId32 " not yet supported in module NapatechDecode", p->datalink); break; } @@ -572,23 +1087,38 @@ TmEcode NapatechDecode(ThreadVars *tv, Packet *p, void *data, PacketQueue *pq, SCReturnInt(TM_ECODE_OK); } +/** + * \brief Initialization of Napatech Thread. + * + * \param t pointer to ThreadVars + * \param initdata - unused. + * \param data pointer that gets cast into DecoderThreadVars + */ TmEcode NapatechDecodeThreadInit(ThreadVars *tv, const void *initdata, void **data) { SCEnter(); DecodeThreadVars *dtv = NULL; dtv = DecodeThreadVarsAlloc(tv); - if (dtv == NULL) + if (dtv == NULL) { SCReturnInt(TM_ECODE_FAILED); + } DecodeRegisterPerfCounters(dtv, tv); *data = (void *) dtv; SCReturnInt(TM_ECODE_OK); } +/** + * \brief Deinitialization of Napatech Thread. + * + * \param tv pointer to ThreadVars + * \param data pointer that gets cast into DecoderThreadVars + */ TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data) { - if (data != NULL) + if (data != NULL) { DecodeThreadVarsFree(tv, data); + } SCReturnInt(TM_ECODE_OK); } diff --git a/src/source-napatech.h b/src/source-napatech.h index 72ce9afaee2b..2e3fb9462fb7 100644 --- a/src/source-napatech.h +++ b/src/source-napatech.h @@ -31,10 +31,14 @@ void TmModuleNapatechDecodeRegister(void); #ifdef HAVE_NAPATECH #include -struct NapatechStreamDevConf { +struct NapatechStreamDevConf +{ uint16_t stream_id; intmax_t hba; }; +int NapatechSetPortmap(int port, int peer); +int NapatechGetAdapter(uint8_t port); + #endif /* HAVE_NAPATECH */ #endif /* __SOURCE_NAPATECH_H__ */ diff --git a/src/util-napatech.c b/src/util-napatech.c index 2d96f8dc8b4a..a8902fe2e30f 100644 --- a/src/util-napatech.c +++ b/src/util-napatech.c @@ -14,7 +14,6 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ - /** * \file * @@ -23,28 +22,200 @@ * * */ - #include "suricata-common.h" + #ifdef HAVE_NAPATECH #include "suricata.h" #include "util-device.h" #include "util-cpu.h" +#include "util-byte.h" #include "threadvars.h" #include "tm-threads.h" +#include "util-napatech.h" +#include "source-napatech.h" + +#ifdef NAPATECH_ENABLE_BYPASS + + * the adapater. + */ +typedef struct FlowStatsCounters_ +{ + uint16_t active_bypass_flows; + uint16_t total_bypass_flows; +} FlowStatsCounters; + +static NtFlowStream_t hFlowStream[MAX_ADAPTERS]; + +static int bypass_supported; +int NapatechIsBypassSupported(void) +{ + return bypass_supported; +} + +/** + * \brief Returns the number of Napatech Adapters in the system. + * + * \return count of the Napatech adapters present in the system. + */ +static int GetNumAdapters(void) +{ + NtInfoStream_t hInfo; + NtInfo_t hInfoSys; + int status; + + if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status); + exit(EXIT_FAILURE); + } + + hInfoSys.cmd = NT_INFO_CMD_READ_SYSTEM; + if ((status = NT_InfoRead(hInfo, &hInfoSys)) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status); + exit(EXIT_FAILURE); + } + + int num_adapters = hInfoSys.u.system.data.numAdapters; + + NT_InfoClose(hInfo); + return num_adapters; +} + +/** + * \brief Initializes the FlowStreams used to program flow data. + * + * Opens a FlowStream on each adapter present in the system. This + * FlowStream is subsequently used to program the adapter with + * flows to bypass. + * + * \return 1 if Bypass functionality is supported; zero otherwise. + */ +int NapatechInitFlowStreams(void) +{ + int status; + int adapter = 0; + int num_adapters = GetNumAdapters(); + SCLogInfo("Found %d Napatech adapters.\n", num_adapters); + memset(&hFlowStream, 0, sizeof(hFlowStream)); + + if (!NapatechUseHWBypass()) { + /* HW Bypass is disabled in the conf file */ + return 0; + } + + for (adapter = 0; adapter < num_adapters; ++adapter) { + NtFlowAttr_t attr; + char flow_name[80]; + + NT_FlowOpenAttrInit(&attr); + NT_FlowOpenAttrSetAdapterNo(&attr, adapter); + + snprintf(flow_name, sizeof(flow_name), "Flow stream %d", adapter ); + SCLogInfo("Opening flow programming stream: %s\n", flow_name); + if ((status = NT_FlowOpen_Attr(&hFlowStream[adapter], flow_name, &attr)) != NT_SUCCESS) { + SCLogWarning(SC_WARN_COMPATIBILITY, "Napatech bypass functionality not supported by the FPGA version on adapter %d - disabling support.", adapter); + bypass_supported = 0; + return 0; + } + } + + bypass_supported = 1; + return bypass_supported; +} + +/** + * \brief Returns a pointer to the FlowStream associated with this adapter. + * + * \return count of the Napatech adapters present in the system. + */ +NtFlowStream_t *NapatechGetFlowStreamPtr(int device) +{ + return &hFlowStream[device]; +} + +/** + * \brief Closes all open FlowStreams + * + * \return Success of the operation. + */ +int NapatechCloseFlowStreams(void) +{ + int status = 0; + int adapter = 0; + int num_adapters = GetNumAdapters(); + + for (adapter = 0; adapter < num_adapters; ++adapter) { + if (hFlowStream[adapter]) { + SCLogInfo("Closing Napatech Flow Stream on adapter %d.", adapter); + if ((status = NT_FlowClose(hFlowStream[adapter])) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_SHUTDOWN, status); + } + hFlowStream[adapter] = NULL; + } + } + return (status == NT_SUCCESS); +} + + +/** + * \brief Updates statistic counters for Napatech FlowStats + * + * \param tv Thread variable to ThreadVars + * \param hInfo Handle to the Napatech InfoStream. + * \param hstat_stream Handle to the Napatech Statistics Stream. + * \param flow_counters The flow counters statistics to update. + * \param clear_stats Indicates if statistics on the card should be reset to zero. + * + */ +static void UpdateFlowStats( + ThreadVars *tv, + NtInfoStream_t hInfo, + NtStatStream_t hstat_stream, + FlowStatsCounters flow_counters, + int clear_stats + ) +{ + NtStatistics_t hStat; + int status; + + uint64_t programed = 0; + uint64_t removed = 0; + int adapter = 0; + + for (adapter = 0; adapter < GetNumAdapters(); ++adapter) { + hStat.cmd = NT_STATISTICS_READ_CMD_FLOW_V0; + hStat.u.flowData_v0.clear = clear_stats; + hStat.u.flowData_v0.adapterNo = adapter; + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); + exit(1); + } + programed = hStat.u.flowData_v0.learnDone; + removed = hStat.u.flowData_v0.unlearnDone + + hStat.u.flowData_v0.automaticUnlearnDone + + hStat.u.flowData_v0.timeoutUnlearnDone; + } + + StatsSetUI64(tv, flow_counters.active_bypass_flows, programed - removed); + StatsSetUI64(tv, flow_counters.total_bypass_flows, programed); +} + +#endif /* NAPATECH_ENABLE_BYPASS */ + /*----------------------------------------------------------------------------- *----------------------------------------------------------------------------- * Statistics code *----------------------------------------------------------------------------- */ - -typedef struct StreamCounters_ { +typedef struct PacketCounters_ +{ uint16_t pkts; uint16_t byte; - uint16_t drop; -} StreamCounters; - + uint16_t drop_pkts; + uint16_t drop_byte; +} PacketCounters; +NapatechCurrentStats total_stats; NapatechCurrentStats current_stats[MAX_STREAMS]; NapatechCurrentStats NapatechGetCurrentStats(uint16_t id) @@ -61,18 +232,25 @@ enum CONFIG_SPECIFIER { #define MAX_HOSTBUFFERS 8 +/** + * \brief Test to see if any of the configured streams are active + * + * \param hInfo Handle to Napatech Info Stream. + * \param hStatsStream Handle to Napatech Statistics stream + * \param stream_config array of stream configuration structures + * \param num_inst + * + */ static uint16_t TestStreamConfig( NtInfoStream_t hInfo, - NtStatStream_t hStatStream, + NtStatStream_t hstat_stream, NapatechStreamConfig stream_config[], uint16_t num_inst) { - uint16_t num_active = 0; for (uint16_t inst = 0; inst < num_inst; ++inst) { int status; - char buffer[80]; // Error buffer NtStatistics_t stat; // Stat handle. /* Check to see if it is an active stream */ @@ -82,11 +260,8 @@ static uint16_t TestStreamConfig( stat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0; stat.u.usageData_v0.streamid = (uint8_t) stream_config[inst].stream_id; - if ((status = NT_StatRead(hStatStream, &stat)) != NT_SUCCESS) { - /* Get the status code as text */ - NT_ExplainError(status, buffer, sizeof (buffer)); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, - "NT_StatRead():2 failed: %s\n", buffer); + if ((status = NT_StatRead(hstat_stream, &stat)) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); return 0; } @@ -101,35 +276,57 @@ static uint16_t TestStreamConfig( return num_active; } +/** + * \brief Updates Napatech packet counters + * + * \param tv Pointer to TheardVars structure + * \param hInfo Handle to Napatech Info Stream. + * \param hstat_stream Handle to Napatech Statistics stream + * \param num_streams the number of streams that are currently active + * \param stream_config array of stream configuration structures + * \param total_counters - cumulative count of all packets received. + * \param dispatch_host, - Count of packets that were delivered to the host buffer + * \param dispatch_drop - count of packets that were dropped as a result of a rule + * \param dispatch_fwd - count of packets forwarded out the egress port as the result of a rule + * \param is_inline - are we running in inline mode? + * \param enable_stream_stats - are per thread/stream statistics enabled. + * \param stream_counters - counters for each thread/stream configured. + * + * \return The number of active streams that were updated. + * + */ static uint32_t UpdateStreamStats(ThreadVars *tv, NtInfoStream_t hInfo, - NtStatStream_t hStatStream, + NtStatStream_t hstat_stream, uint16_t num_streams, NapatechStreamConfig stream_config[], - StreamCounters streamCounters[] + PacketCounters total_counters, + PacketCounters dispatch_host, + PacketCounters dispatch_drop, + PacketCounters dispatch_fwd, + int is_inline, + int enable_stream_stats, + PacketCounters stream_counters[] ) { static uint64_t rxPktsStart[MAX_STREAMS] = {0}; static uint64_t rxByteStart[MAX_STREAMS] = {0}; - static uint64_t dropStart[MAX_STREAMS] = {0}; + static uint64_t dropPktStart[MAX_STREAMS] = {0}; + static uint64_t dropByteStart[MAX_STREAMS] = {0}; int status; - char error_buffer[80]; // Error buffer NtInfo_t hStreamInfo; NtStatistics_t hStat; // Stat handle. /* Query the system to get the number of streams currently instantiated */ hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM; if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) { - NT_ExplainError(status, error_buffer, sizeof (error_buffer) - 1); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, - "NT_InfoRead() failed: %s\n", error_buffer); + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); exit(EXIT_FAILURE); } uint16_t num_active; - if ((num_active = TestStreamConfig(hInfo, hStatStream, - stream_config, num_streams)) == 0) { + if ((num_active = TestStreamConfig(hInfo, hstat_stream, stream_config, num_streams)) == 0) { /* None of the configured streams are active */ return 0; } @@ -138,8 +335,6 @@ static uint32_t UpdateStreamStats(ThreadVars *tv, uint16_t inst_id = 0; uint32_t stream_cnt = 0; for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) { - - while (inst_id < num_streams) { if (stream_config[inst_id].is_active) { break; @@ -155,114 +350,327 @@ static uint32_t UpdateStreamStats(ThreadVars *tv, hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0; hStat.u.usageData_v0.streamid = (uint8_t) stream_config[inst_id].stream_id; - if ((status = NT_StatRead(hStatStream, &hStat)) != NT_SUCCESS) { - /* Get the status code as text */ - NT_ExplainError(status, error_buffer, sizeof (error_buffer)); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, - "NT_StatRead() failed: %s\n", error_buffer); + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); return 0; } uint16_t stream_id = stream_config[inst_id].stream_id; if (stream_config[inst_id].is_active) { - uint64_t rxPktsTotal = 0; - uint64_t rxByteTotal = 0; - uint64_t dropTotal = 0; + uint64_t rx_pkts_total = 0; + uint64_t rx_byte_total = 0; + uint64_t drop_pkts_total = 0; + uint64_t drop_byte_total = 0; for (uint32_t hbCount = 0; hbCount < hStat.u.usageData_v0.data.numHostBufferUsed; hbCount++) { if (unlikely(stream_config[inst_id].initialized == false)) { rxPktsStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames; rxByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes; - dropStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames; + dropPktStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames; + dropByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes; stream_config[inst_id].initialized = true; } else { - rxPktsTotal += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames; - rxByteTotal += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes; - dropTotal += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames; + rx_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames; + rx_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes; + drop_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames; + drop_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes; } } - current_stats[stream_id].current_packets = rxPktsTotal - rxPktsStart[stream_id]; - current_stats[stream_id].current_bytes = rxByteTotal - rxByteStart[stream_id]; - current_stats[stream_id].current_drops = dropTotal - dropStart[stream_id]; + current_stats[stream_id].current_packets = rx_pkts_total - rxPktsStart[stream_id]; + current_stats[stream_id].current_bytes = rx_byte_total - rxByteStart[stream_id]; + current_stats[stream_id].current_drop_packets = drop_pkts_total - dropPktStart[stream_id]; + current_stats[stream_id].current_drop_bytes = drop_byte_total - dropByteStart[stream_id]; + } + + if (enable_stream_stats) { + StatsSetUI64(tv, stream_counters[inst_id].pkts, current_stats[stream_id].current_packets); + StatsSetUI64(tv, stream_counters[inst_id].byte, current_stats[stream_id].current_bytes); + StatsSetUI64(tv, stream_counters[inst_id].drop_pkts, current_stats[stream_id].current_drop_packets); + StatsSetUI64(tv, stream_counters[inst_id].drop_byte, current_stats[stream_id].current_drop_bytes); } - StatsSetUI64(tv, streamCounters[inst_id].pkts, current_stats[stream_id].current_packets); - StatsSetUI64(tv, streamCounters[inst_id].byte, current_stats[stream_id].current_bytes); - StatsSetUI64(tv, streamCounters[inst_id].drop, current_stats[stream_id].current_drops); - ++inst_id; } + + uint32_t stream_id; + for (stream_id = 0; stream_id < num_streams; ++stream_id) { + +#ifndef NAPATECH_ENABLE_BYPASS + total_stats.current_packets += current_stats[stream_id].current_packets; + total_stats.current_bytes += current_stats[stream_id].current_bytes; +#endif /* NAPATECH_ENABLE_BYPASS */ + total_stats.current_drop_packets += current_stats[stream_id].current_drop_packets; + total_stats.current_drop_bytes += current_stats[stream_id].current_drop_bytes; + } + + +#ifndef NAPATECH_ENABLE_BYPASS + StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets); + StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes); +#endif /* NAPATECH_ENABLE_BYPASS */ + + StatsSetUI64(tv, total_counters.drop_pkts, total_stats.current_drop_packets); + StatsSetUI64(tv, total_counters.drop_byte, total_stats.current_drop_bytes); + + total_stats.current_packets = 0; + total_stats.current_bytes = 0; + total_stats.current_drop_packets = 0; + total_stats.current_drop_bytes = 0; + + + /* Read usage data for the chosen stream ID */ + memset(&hStat, 0, sizeof (NtStatistics_t)); + +#ifdef NAPATECH_ENABLE_BYPASS + hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3; + hStat.u.query_v3.clear = 0; +#else /* NAPATECH_ENABLE_BYPASS */ + /* Older versions of the API have a different structure. */ + hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2; + hStat.u.query_v2.clear = 0; +#endif /* !NAPATECH_ENABLE_BYPASS */ + + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + if (status == NT_STATUS_TIMEOUT) { + SCLogInfo("Statistics timed out - will retry next time."); + return 0; + } else { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); + return 0; + } + } + +#ifdef NAPATECH_ENABLE_BYPASS + + int adapter = 0; + uint64_t total_dispatch_host_pkts = 0; + uint64_t total_dispatch_host_byte = 0; + uint64_t total_dispatch_drop_pkts = 0; + uint64_t total_dispatch_drop_byte = 0; + uint64_t total_dispatch_fwd_pkts = 0; + uint64_t total_dispatch_fwd_byte = 0; + + for (adapter = 0; adapter < GetNumAdapters(); ++adapter) { + total_dispatch_host_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts; + total_dispatch_host_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets; + + total_dispatch_drop_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts; + total_dispatch_drop_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].octets; + + total_dispatch_fwd_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].pkts; + total_dispatch_fwd_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].octets; + + total_stats.current_packets += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts; + + total_stats.current_bytes = hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets; + } + + StatsSetUI64(tv, dispatch_host.pkts, total_dispatch_host_pkts); + StatsSetUI64(tv, dispatch_host.byte, total_dispatch_host_byte); + + StatsSetUI64(tv, dispatch_drop.pkts, total_dispatch_drop_pkts); + StatsSetUI64(tv, dispatch_drop.byte, total_dispatch_drop_byte); + + if (is_inline) { + StatsSetUI64(tv, dispatch_fwd.pkts, total_dispatch_fwd_pkts); + StatsSetUI64(tv, dispatch_fwd.byte, total_dispatch_fwd_byte); + } + + StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets); + StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes); + +#endif /* NAPATECH_ENABLE_BYPASS */ + return num_active; } +/** + * \brief Statistics processing loop + * + * Instantiated on the stats thread. Periodically retrieives + * statistics from the Napatech card and updates the packet counters + * + * \param arg Pointer that is caste into a TheardVars structure + */ static void *NapatechStatsLoop(void *arg) { ThreadVars *tv = (ThreadVars *) arg; int status; - char error_buffer[80]; // Error buffer NtInfoStream_t hInfo; - NtStatStream_t hStatStream; + NtStatStream_t hstat_stream; + int is_inline = 0; + int enable_stream_stats = 0; + PacketCounters stream_counters[MAX_STREAMS]; + + if (ConfGetBool("napatech.inline", &is_inline) == 0) { + is_inline = 0; + } + + if (ConfGetBool("napatech.enable-stream-stats", &enable_stream_stats) == 0) { + /* default is "no" */ + enable_stream_stats = 0; + } NapatechStreamConfig stream_config[MAX_STREAMS]; uint16_t stream_cnt = NapatechGetStreamConfig(stream_config); /* Open the info and Statistics */ if ((status = NT_InfoOpen(&hInfo, "StatsLoopInfoStream")) != NT_SUCCESS) { - NT_ExplainError(status, error_buffer, sizeof (error_buffer) - 1); - SCLogError(SC_ERR_RUNMODE, "NT_InfoOpen() failed: %s\n", error_buffer); + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); return NULL; } - if ((status = NT_StatOpen(&hStatStream, "StatsLoopStatsStream")) != NT_SUCCESS) { - /* Get the status code as text */ - NT_ExplainError(status, error_buffer, sizeof (error_buffer)); - SCLogError(SC_ERR_RUNMODE, "NT_StatOpen() failed: %s\n", error_buffer); + if ((status = NT_StatOpen(&hstat_stream, "StatsLoopStatsStream")) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); return NULL; } - StreamCounters streamCounters[MAX_STREAMS]; - for (int i = 0; i < stream_cnt; ++i) { - char *pkts_buf = SCCalloc(1, 32); - if (unlikely(pkts_buf == NULL)) { - SCLogError(SC_ERR_MEM_ALLOC, - "Failed to allocate memory for NAPATECH stream counter."); - exit(EXIT_FAILURE); - } + NtStatistics_t hStat; + memset(&hStat, 0, sizeof (NtStatistics_t)); - snprintf(pkts_buf, 32, "nt%d.pkts", stream_config[i].stream_id); - streamCounters[i].pkts = StatsRegisterCounter(pkts_buf, tv); +#ifdef NAPATECH_ENABLE_BYPASS + hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3; + hStat.u.query_v3.clear = 1; +#else /* NAPATECH_ENABLE_BYPASS */ + hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2; + hStat.u.query_v2.clear = 1; +#endif /* !NAPATECH_ENABLE_BYPASS */ - char *byte_buf = SCCalloc(1, 32); - if (unlikely(byte_buf == NULL)) { - SCLogError(SC_ERR_MEM_ALLOC, - "Failed to allocate memory for NAPATECH stream counter."); - exit(EXIT_FAILURE); - } - snprintf(byte_buf, 32, "nt%d.bytes", stream_config[i].stream_id); - streamCounters[i].byte = StatsRegisterCounter(byte_buf, tv); + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); + return 0; + } - char *drop_buf = SCCalloc(1, 32); - if (unlikely(drop_buf == NULL)) { - SCLogError(SC_ERR_MEM_ALLOC, - "Failed to allocate memory for NAPATECH stream counter."); - exit(EXIT_FAILURE); + PacketCounters total_counters; + memset(&total_counters, 0, sizeof(total_counters)); + + PacketCounters dispatch_host; + memset(&dispatch_host, 0, sizeof(dispatch_host)); + + PacketCounters dispatch_drop; + memset(&dispatch_drop, 0, sizeof(dispatch_drop)); + + PacketCounters dispatch_fwd; + memset(&dispatch_fwd, 0, sizeof(dispatch_fwd)); + + total_counters.pkts = StatsRegisterCounter("napa_total.pkts", tv); + dispatch_host.pkts = StatsRegisterCounter("napa_dispatch_host.pkts", tv); + dispatch_drop.pkts = StatsRegisterCounter("napa_dispatch_drop.pkts", tv); + if (is_inline) { + dispatch_fwd.pkts = StatsRegisterCounter("napa_dispatch_fwd.pkts", tv); + } + + total_counters.byte = StatsRegisterCounter("napa_total.byte", tv); + dispatch_host.byte = StatsRegisterCounter("napa_dispatch_host.byte", tv); + dispatch_drop.byte = StatsRegisterCounter("napa_dispatch_drop.byte", tv); + if (is_inline) { + dispatch_fwd.byte = StatsRegisterCounter("napa_dispatch_fwd.byte", tv); + } + + total_counters.drop_pkts = StatsRegisterCounter("napa_total.overflow_drop_pkts", tv); + total_counters.drop_byte = StatsRegisterCounter("napa_total.overflow_drop_byte", tv); + + if (enable_stream_stats) { + for (int i = 0; i < stream_cnt; ++i) { + char *pkts_buf = SCCalloc(1, 32); + if (unlikely(pkts_buf == NULL)) { + SCLogError(SC_ERR_MEM_ALLOC, + "Failed to allocate memory for NAPATECH stream counter."); + exit(EXIT_FAILURE); + } + + snprintf(pkts_buf, 32, "napa%d.pkts", stream_config[i].stream_id); + stream_counters[i].pkts = StatsRegisterCounter(pkts_buf, tv); + + char *byte_buf = SCCalloc(1, 32); + if (unlikely(byte_buf == NULL)) { + SCLogError(SC_ERR_MEM_ALLOC, + "Failed to allocate memory for NAPATECH stream counter."); + exit(EXIT_FAILURE); + } + snprintf(byte_buf, 32, "napa%d.bytes", stream_config[i].stream_id); + stream_counters[i].byte = StatsRegisterCounter(byte_buf, tv); + + char *drop_pkts_buf = SCCalloc(1, 32); + if (unlikely(drop_pkts_buf == NULL)) { + SCLogError(SC_ERR_MEM_ALLOC, + "Failed to allocate memory for NAPATECH stream counter."); + exit(EXIT_FAILURE); + } + snprintf(drop_pkts_buf, 32, "napa%d.drop_pkts", stream_config[i].stream_id); + stream_counters[i].drop_pkts = StatsRegisterCounter(drop_pkts_buf, tv); + + char *drop_byte_buf = SCCalloc(1, 32); + if (unlikely(drop_byte_buf == NULL)) { + SCLogError(SC_ERR_MEM_ALLOC, + "Failed to allocate memory for NAPATECH stream counter."); + exit(EXIT_FAILURE); + } + snprintf(drop_byte_buf, 32, "napa%d.drop_byte", stream_config[i].stream_id); + stream_counters[i].drop_byte = StatsRegisterCounter(drop_byte_buf, tv); } - snprintf(drop_buf, 32, "nt%d.drop", stream_config[i].stream_id); - streamCounters[i].drop = StatsRegisterCounter(drop_buf, tv); } +#ifdef NAPATECH_ENABLE_BYPASS + FlowStatsCounters flow_counters; + if (bypass_supported) { + flow_counters.active_bypass_flows = StatsRegisterCounter("napa_bypass.active_flows", tv); + flow_counters.total_bypass_flows = StatsRegisterCounter("napa_bypass.total_flows", tv); + } +#endif /* NAPATECH_ENABLE_BYPASS */ + StatsSetupPrivate(tv); - for (int i = 0; i < stream_cnt; ++i) { - StatsSetUI64(tv, streamCounters[i].pkts, 0); - StatsSetUI64(tv, streamCounters[i].byte, 0); - StatsSetUI64(tv, streamCounters[i].drop, 0); + StatsSetUI64(tv, total_counters.pkts, 0); + StatsSetUI64(tv, total_counters.byte, 0); + StatsSetUI64(tv, total_counters.drop_pkts, 0); + StatsSetUI64(tv, total_counters.drop_byte, 0); + +#ifdef NAPATECH_ENABLE_BYPASS + if (bypass_supported) { + StatsSetUI64(tv, dispatch_host.pkts, 0); + StatsSetUI64(tv, dispatch_drop.pkts, 0); + + if (is_inline) { + StatsSetUI64(tv, dispatch_fwd.pkts, 0); + } + + StatsSetUI64(tv, dispatch_host.byte, 0); + StatsSetUI64(tv, dispatch_drop.byte, 0); + if (is_inline) { + StatsSetUI64(tv, dispatch_fwd.byte, 0); + } + + if (enable_stream_stats) { + for (int i = 0; i < stream_cnt; ++i) { + StatsSetUI64(tv, stream_counters[i].pkts, 0); + StatsSetUI64(tv, stream_counters[i].byte, 0); + StatsSetUI64(tv, stream_counters[i].drop_pkts, 0); + StatsSetUI64(tv, stream_counters[i].drop_byte, 0); + } + } + + StatsSetUI64(tv, flow_counters.active_bypass_flows, 0); + StatsSetUI64(tv, flow_counters.total_bypass_flows, 0); + UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 1); } +#endif /* NAPATECH_ENABLE_BYPASS */ - uint32_t num_active = UpdateStreamStats(tv, hInfo, hStatStream, - stream_cnt, stream_config, streamCounters); + uint32_t num_active = UpdateStreamStats(tv, hInfo, hstat_stream, + stream_cnt, stream_config, total_counters, + dispatch_host, dispatch_drop, dispatch_fwd, + is_inline, enable_stream_stats, stream_counters); if (!NapatechIsAutoConfigEnabled() && (num_active < stream_cnt)) { SCLogInfo("num_active: %d, stream_cnt: %d", num_active, stream_cnt); @@ -277,8 +685,17 @@ static void *NapatechStatsLoop(void *arg) break; } - UpdateStreamStats(tv, hInfo, hStatStream, - stream_cnt, stream_config, streamCounters); + UpdateStreamStats(tv, hInfo, hstat_stream, + stream_cnt, stream_config, total_counters, + dispatch_host, dispatch_drop, dispatch_fwd, + is_inline, enable_stream_stats, + stream_counters); + +#ifdef NAPATECH_ENABLE_BYPASS + if (bypass_supported) { + UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 0); + } +#endif /* NAPATECH_ENABLE_BYPASS */ StatsSyncCountersIfSignalled(tv); usleep(1000000); @@ -286,20 +703,16 @@ static void *NapatechStatsLoop(void *arg) /* CLEAN UP NT Resources and Close the info stream */ if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) { - NT_ExplainError(status, error_buffer, sizeof (error_buffer) - 1); - SCLogError(SC_ERR_RUNMODE, "NT_InfoClose() failed: %s\n", error_buffer); + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); return NULL; } /* Close the statistics stream */ - if ((status = NT_StatClose(hStatStream)) != NT_SUCCESS) { - /* Get the status code as text */ - NT_ExplainError(status, error_buffer, sizeof (error_buffer)); - SCLogError(SC_ERR_RUNMODE, "NT_StatClose() failed: %s\n", error_buffer); + if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); return NULL; } - SCLogDebug("Exiting NapatechStatsLoop"); TmThreadsSetFlag(tv, THV_RUNNING_DONE); TmThreadWaitForFlag(tv, THV_DEINIT); @@ -312,8 +725,18 @@ static void *NapatechStatsLoop(void *arg) #define MAX_STREAMS 256 #define HB_HIGHWATER 2048 //1982 +/** + * \brief Tests whether a particular stream_id is actively registered + * + * \param stream_id - ID of the stream to look up + * \param num_registered - The total number of registered streams + * \param registered_streams - An array containing actively registered streams. + * + * \return Bool indicating is the specified stream is registered. + * + */ static bool RegisteredStream(uint16_t stream_id, uint16_t num_registered, - NapatechStreamConfig registered_streams[]) + NapatechStreamConfig registered_streams[]) { for (uint16_t reg_id = 0; reg_id < num_registered; ++reg_id) { if (stream_id == registered_streams[reg_id].stream_id) { @@ -323,6 +746,11 @@ static bool RegisteredStream(uint16_t stream_id, uint16_t num_registered, return false; } +/** + * \brief Count the number of worker threads defined in the conf file. + * + * \return - The number of worker threads defined by the configuration + */ static uint32_t CountWorkerThreads(void) { int worker_count = 0; @@ -331,6 +759,7 @@ static uint32_t CountWorkerThreads(void) ConfNode *root = ConfGetNode("threading.cpu-affinity"); if (root != NULL) { + TAILQ_FOREACH(affinity, &root->head, next) { if (strcmp(affinity->val, "decode-cpu-set") == 0 || @@ -370,8 +799,9 @@ static uint32_t CountWorkerThreads(void) char copystr[16]; strlcpy(copystr, lnode->val, 16); - start = atoi(copystr); - end = atoi(strchr(copystr, '-') + 1); + ByteExtractStringUint8(&start, 10, 0, copystr); + ByteExtractStringUint8(&end, 10, 0, strchr(copystr, '-') + 1); + worker_count = end - start + 1; } else { @@ -392,11 +822,19 @@ static uint32_t CountWorkerThreads(void) return worker_count; } +/** + * \brief Reads and parses the stream configuration defined in the config file. + * + * \param stream_config - array to be filled in with active stream info. + * + * \return the number of streams configured or -1 if an error occurred + * + */ int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]) { int status; char error_buffer[80]; // Error buffer - NtStatStream_t hStatStream; + NtStatStream_t hstat_stream; NtStatistics_t hStat; // Stat handle. NtInfoStream_t info_stream; NtInfo_t info; @@ -420,25 +858,19 @@ int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]) } if ((status = NT_InfoOpen(&info_stream, "SuricataStreamInfo")) != NT_SUCCESS) { - NT_ExplainError(status, error_buffer, sizeof (error_buffer) - 1); - SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, - "NT_InfoOpen failed: %s", error_buffer); + NAPATECH_ERROR(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, status); return -1; } - if ((status = NT_StatOpen(&hStatStream, "StatsStream")) != NT_SUCCESS) { - /* Get the status code as text */ - NT_ExplainError(status, error_buffer, sizeof (error_buffer)); - SCLogError(SC_ERR_RUNMODE, "NT_StatOpen() failed: %s\n", error_buffer); + if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, status); return -1; } if (use_all_streams) { info.cmd = NT_INFO_CMD_READ_STREAM; if ((status = NT_InfoRead(info_stream, &info)) != NT_SUCCESS) { - NT_ExplainError(status, error_buffer, sizeof (error_buffer) - 1); - SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, - "NT_InfoRead failed: %s", error_buffer); + NAPATECH_ERROR(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, status); return -1; } @@ -456,11 +888,10 @@ int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]) hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0; hStat.u.usageData_v0.streamid = (uint8_t) stream_id; - if ((status = NT_StatRead(hStatStream, &hStat)) != NT_SUCCESS) { + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { /* Get the status code as text */ NT_ExplainError(status, error_buffer, sizeof (error_buffer)); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, - "NT_StatRead() failed: %s\n", error_buffer); + SCLogError(SC_ERR_NAPATECH_INIT_FAILED, "NT_StatRead() failed: %s\n", error_buffer); return -1; } @@ -481,11 +912,10 @@ int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]) start = 0; end = CountWorkerThreads() - 1; } else { - /* When not using the default streams we need to + /* When not using the default streams we need to * parse the array of streams from the conf */ if ((ntstreams = ConfGetNode("napatech.streams")) == NULL) { - SCLogError(SC_ERR_RUNMODE, - "Failed retrieving napatech.streams from Config"); + SCLogError(SC_ERR_RUNMODE, "Failed retrieving napatech.streams from Config"); if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 0)) { SCLogError(SC_ERR_RUNMODE, "if set-cpu-affinity: no in conf then napatech.streams must be defined"); @@ -502,8 +932,7 @@ int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]) { if (stream == NULL) { - SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, - "Couldn't Parse Stream Configuration"); + SCLogError(SC_ERR_NAPATECH_INIT_FAILED, "Couldn't Parse Stream Configuration"); return -1; } @@ -517,9 +946,8 @@ int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]) char copystr[16]; strlcpy(copystr, stream->val, 16); - - start = atoi(copystr); - end = atoi(strchr(copystr, '-') + 1); + ByteExtractStringUint16(&start, 10, 0, copystr); + ByteExtractStringUint16(&end, 10, 0, strchr(copystr, '-') + 1); } else { if (stream_spec == CONFIG_SPECIFIER_RANGE) { SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, @@ -527,8 +955,7 @@ int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]) exit(EXIT_FAILURE); } stream_spec = CONFIG_SPECIFIER_INDIVIDUAL; - - stream_config[instance_cnt].stream_id = atoi(stream->val); + ByteExtractStringUint16(&stream_config[instance_cnt].stream_id, 10, 0, stream->val); start = stream_config[instance_cnt].stream_id; end = stream_config[instance_cnt].stream_id; } @@ -547,11 +974,8 @@ int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]) hStat.u.usageData_v0.streamid = (uint8_t) stream_config[instance_cnt].stream_id; - if ((status = NT_StatRead(hStatStream, &hStat)) != NT_SUCCESS) { - /* Get the status code as text */ - NT_ExplainError(status, error_buffer, sizeof (error_buffer)); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, - "NT_StatRead() failed: %s\n", error_buffer); + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); return -1; } @@ -563,17 +987,13 @@ int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]) } /* Close the statistics stream */ - if ((status = NT_StatClose(hStatStream)) != NT_SUCCESS) { - /* Get the status code as text */ - NT_ExplainError(status, error_buffer, sizeof (error_buffer)); - SCLogError(SC_ERR_RUNMODE, "NT_StatClose() failed: %s\n", error_buffer); + if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); return -1; } if ((status = NT_InfoClose(info_stream)) != NT_SUCCESS) { - NT_ExplainError(status, error_buffer, sizeof (error_buffer) - 1); - SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, - "NT_InfoClose failed: %s", error_buffer); + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); return -1; } @@ -587,16 +1007,16 @@ static void *NapatechBufMonitorLoop(void *arg) NtInfo_t hStreamInfo; NtStatistics_t hStat; // Stat handle. NtInfoStream_t hInfo; - NtStatStream_t hStatStream; - - char error_buffer[NT_ERRBUF_SIZE]; // Error buffer + NtStatStream_t hstat_stream; int status; // Status variable const uint32_t alertInterval = 25; +#ifndef NAPATECH_ENABLE_BYPASS uint32_t OB_fill_level[MAX_STREAMS] = {0}; uint32_t OB_alert_level[MAX_STREAMS] = {0}; uint32_t ave_OB_fill_level[MAX_STREAMS] = {0}; +#endif /* NAPATECH_ENABLE_BYPASS */ uint32_t HB_fill_level[MAX_STREAMS] = {0}; uint32_t HB_alert_level[MAX_STREAMS] = {0}; @@ -604,26 +1024,19 @@ static void *NapatechBufMonitorLoop(void *arg) /* Open the info and Statistics */ if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) { - NT_ExplainError(status, error_buffer, sizeof (error_buffer) - 1); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, - "NT_InfoOpen() failed: %s\n", error_buffer); + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); exit(EXIT_FAILURE); } - if ((status = NT_StatOpen(&hStatStream, "StatsStream")) != NT_SUCCESS) { - /* Get the status code as text */ - NT_ExplainError(status, error_buffer, sizeof (error_buffer)); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, - "NT_StatOpen() failed: %s\n", error_buffer); + if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); exit(EXIT_FAILURE); } /* Read the info on all streams instantiated in the system */ hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM; if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) { - NT_ExplainError(status, error_buffer, sizeof (error_buffer) - 1); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, - "NT_InfoRead() failed: %s\n", error_buffer); + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); exit(EXIT_FAILURE); } @@ -645,9 +1058,7 @@ static void *NapatechBufMonitorLoop(void *arg) /* Read the info on all streams instantiated in the system */ hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM; if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) { - NT_ExplainError(status, error_buffer, sizeof (error_buffer) - 1); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, - "NT_InfoRead() failed: %s\n", error_buffer); + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); exit(EXIT_FAILURE); } @@ -666,11 +1077,8 @@ static void *NapatechBufMonitorLoop(void *arg) hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0; hStat.u.usageData_v0.streamid = (uint8_t) stream_id; - if ((status = NT_StatRead(hStatStream, &hStat)) != NT_SUCCESS) { - /* Get the status code as text */ - NT_ExplainError(status, error_buffer, sizeof (error_buffer)); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, - "NT_StatRead() failed: %s\n", error_buffer); + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); exit(EXIT_FAILURE); } @@ -681,13 +1089,16 @@ static void *NapatechBufMonitorLoop(void *arg) } while (hStat.u.usageData_v0.data.numHostBufferUsed == 0); if (RegisteredStream(stream_id, num_registered, registered_streams)) { + +#ifndef NAPATECH_ENABLE_BYPASS ave_OB_fill_level[stream_id] = 0; +#endif /* NAPATECH_ENABLE_BYPASS */ + ave_HB_fill_level[stream_id] = 0; - for (uint32_t hb_count = 0; - hb_count < hStat.u.usageData_v0.data.numHostBufferUsed; - hb_count++) { + for (uint32_t hb_count = 0; hb_count < hStat.u.usageData_v0.data.numHostBufferUsed; hb_count++) { +#ifndef NAPATECH_ENABLE_BYPASS OB_fill_level[hb_count] = ((100 * hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.used) / hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.size); @@ -695,7 +1106,7 @@ static void *NapatechBufMonitorLoop(void *arg) if (OB_fill_level[hb_count] > 100) { OB_fill_level[hb_count] = 100; } - +#endif /* NAPATECH_ENABLE_BYPASS */ uint32_t bufSize = hStat.u.usageData_v0.data.hb[hb_count].enQueuedAdapter / 1024 + hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024 + hStat.u.usageData_v0.data.hb[hb_count].enQueued / 1024 @@ -705,28 +1116,32 @@ static void *NapatechBufMonitorLoop(void *arg) ((100 * hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024) / bufSize); +#ifndef NAPATECH_ENABLE_BYPASS ave_OB_fill_level[stream_id] += OB_fill_level[hb_count]; +#endif /* NAPATECH_ENABLE_BYPASS */ + ave_HB_fill_level[stream_id] += HB_fill_level[hb_count]; } +#ifndef NAPATECH_ENABLE_BYPASS ave_OB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed; +#endif /* NAPATECH_ENABLE_BYPASS */ + ave_HB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed; /* Host Buffer Fill Level warnings... */ if (ave_HB_fill_level[stream_id] >= (HB_alert_level[stream_id] + alertInterval)) { - while (ave_HB_fill_level[stream_id] >= HB_alert_level[stream_id] - + alertInterval) { - + while (ave_HB_fill_level[stream_id] >= HB_alert_level[stream_id] + alertInterval) { HB_alert_level[stream_id] += alertInterval; } - SCLogInfo("nt%d - Increasing Host Buffer Fill Level : %4d%%", + SCLogPerf("nt%d - Increasing Host Buffer Fill Level : %4d%%", stream_id, ave_HB_fill_level[stream_id] - 1); } if (HB_alert_level[stream_id] > 0) { if ((ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval))) { - SCLogInfo("nt%d - Decreasing Host Buffer Fill Level: %4d%%", + SCLogPerf("nt%d - Decreasing Host Buffer Fill Level: %4d%%", stream_id, ave_HB_fill_level[stream_id]); while (ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval)) { @@ -737,19 +1152,20 @@ static void *NapatechBufMonitorLoop(void *arg) } } +#ifndef NAPATECH_ENABLE_BYPASS /* On Board SDRAM Fill Level warnings... */ if (ave_OB_fill_level[stream_id] >= (OB_alert_level[stream_id] + alertInterval)) { while (ave_OB_fill_level[stream_id] >= OB_alert_level[stream_id] + alertInterval) { OB_alert_level[stream_id] += alertInterval; } - SCLogInfo("nt%d - Increasing Adapter SDRAM Fill Level: %4d%%", + SCLogPerf("nt%d - Increasing Adapter SDRAM Fill Level: %4d%%", stream_id, ave_OB_fill_level[stream_id]); } if (OB_alert_level[stream_id] > 0) { if ((ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval))) { - SCLogInfo("nt%d - Decreasing Adapter SDRAM Fill Level : %4d%%", + SCLogPerf("nt%d - Decreasing Adapter SDRAM Fill Level : %4d%%", stream_id, ave_OB_fill_level[stream_id]); while (ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval)) { @@ -759,22 +1175,20 @@ static void *NapatechBufMonitorLoop(void *arg) } } } +#endif /* NAPATECH_ENABLE_BYPASS */ } ++stream_id; } } if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) { - NT_ExplainError(status, error_buffer, sizeof (error_buffer) - 1); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, "NT_InfoClose() failed: %s\n", error_buffer); + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); exit(EXIT_FAILURE); } /* Close the statistics stream */ - if ((status = NT_StatClose(hStatStream)) != NT_SUCCESS) { - /* Get the status code as text */ - NT_ExplainError(status, error_buffer, sizeof (error_buffer)); - SCLogError(SC_ERR_NAPATECH_INIT_FAILED, "NT_StatClose() failed: %s\n", error_buffer); + if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); exit(EXIT_FAILURE); } @@ -786,6 +1200,7 @@ static void *NapatechBufMonitorLoop(void *arg) return NULL; } + void NapatechStartStats(void) { /* Creates the Statistic threads */ @@ -806,6 +1221,12 @@ void NapatechStartStats(void) exit(EXIT_FAILURE); } +#ifdef NAPATECH_ENABLE_BYPASS + if (bypass_supported) { + SCLogInfo("Napatech bypass functionality enabled."); + } +#endif /* NAPATECH_ENABLE_BYPASS */ + ThreadVars *buf_monitor_tv = TmThreadCreate("NapatechBufMonitor", NULL, NULL, NULL, NULL, @@ -838,7 +1259,6 @@ bool NapatechSetupNuma(uint32_t stream, uint32_t numa) NtNtplInfo_t ntpl_info; if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) { - NAPATECH_ERROR(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, status); return false; } @@ -854,7 +1274,7 @@ bool NapatechSetupNuma(uint32_t stream, uint32_t numa) return status; } -static bool NapatechSetHashmode(uint32_t *filter_id) +static uint32_t NapatechSetHashmode(void) { uint32_t status = 0; const char *hash_mode; @@ -862,7 +1282,7 @@ static bool NapatechSetHashmode(uint32_t *filter_id) char ntpl_cmd[64]; NtNtplInfo_t ntpl_info; - *filter_id = 0; + uint32_t filter_id = 0; /* Get the hashmode from the conf file. */ ConfGetValue("napatech.hashmode", &hash_mode); @@ -877,26 +1297,26 @@ static bool NapatechSetHashmode(uint32_t *filter_id) if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) { - *filter_id = ntpl_info.ntplId; - SCLogInfo("Napatech hashmode: %s ID: %d", hash_mode, status); + filter_id = ntpl_info.ntplId; + SCLogConfig("Napatech hashmode: %s ID: %d", hash_mode, status); } else { NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status); status = 0; } - return status; + return filter_id; } static uint32_t GetStreamNUMAs(uint32_t stream_id, int stream_numas[]) { NtStatistics_t hStat; // Stat handle. - NtStatStream_t hStatStream; + NtStatStream_t hstat_stream; int status; // Status variable for (int i = 0; i < MAX_HOSTBUFFERS; ++i) stream_numas[i] = -1; - if ((status = NT_StatOpen(&hStatStream, "StatsStream")) != NT_SUCCESS) { + if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) { NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); exit(EXIT_FAILURE); } @@ -909,7 +1329,7 @@ static uint32_t GetStreamNUMAs(uint32_t stream_id, int stream_numas[]) hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0; hStat.u.usageData_v0.streamid = (uint8_t) stream_id; - if ((status = NT_StatRead(hStatStream, &hStat)) != NT_SUCCESS) { + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); exit(EXIT_FAILURE); } @@ -921,19 +1341,123 @@ static uint32_t GetStreamNUMAs(uint32_t stream_id, int stream_numas[]) return hStat.u.usageData_v0.data.numHostBufferUsed; } -uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream, - uint32_t *filter_id, uint32_t *hash_id) +static int NapatechSetFilter(NtConfigStream_t hconfig, char *ntpl_cmd) +{ + int status = 0; + int local_filter_id = 0; + + NtNtplInfo_t ntpl_info; + if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, + NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) { + SCLogConfig("NTPL filter assignment \"%s\" returned filter id %4d", + ntpl_cmd, local_filter_id); + } else { + NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status); + exit(EXIT_FAILURE); + } + + return local_filter_id; +} + +uint32_t NapatechDeleteFilters(void) +{ + uint32_t status = 0; + static NtConfigStream_t hconfig; + char ntpl_cmd[64]; + NtNtplInfo_t ntpl_info; + + if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, status); + exit(EXIT_FAILURE); + } + + snprintf(ntpl_cmd, 64, "delete = all"); + if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, + NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) { + status = ntpl_info.ntplId; + } else { + NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status); + status = 0; + } + + NT_ConfigClose(hconfig); + + return status; +} + + +uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream) { #define PORTS_SPEC_SIZE 64 - char ports_spec[PORTS_SPEC_SIZE]; + struct ports_spec_s { + uint8_t first[MAX_PORTS]; + uint8_t second[MAX_PORTS]; + bool all; + char str[PORTS_SPEC_SIZE]; + } ports_spec; + + ports_spec.all = false; + ConfNode *ntports; - bool first_iteration = true; + int iteration = 0; int status = 0; - static NtConfigStream_t hconfig; - char ntpl_cmd[128]; + NtConfigStream_t hconfig; + char ntpl_cmd[512]; + int is_inline = 0; + int is_span_port[MAX_PORTS] = { 0 }; - NapatechSetHashmode(hash_id); + char span_ports[128]; + memset(span_ports, 0, sizeof(span_ports)); + + if (ConfGetBool("napatech.inline", &is_inline) == 0) { + is_inline = 0; + } + + NapatechSetHashmode(); + + if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) { + NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); + exit(EXIT_FAILURE); + } + + if (first_stream == last_stream) { + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "Setup[state=inactive] = StreamId == %d", + first_stream); + } else { + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "Setup[state=inactive] = StreamId == (%d..%d)", + first_stream, last_stream); + } + NapatechSetFilter(hconfig, ntpl_cmd); + +#ifdef NAPATECH_ENABLE_BYPASS + if (NapatechUseHWBypass()) { + SCLogInfo("Napatech Hardware Bypass enabled."); + } else { + SCLogInfo("Napatech Hardware Bypass available but disabled."); + } +#else + if (NapatechUseHWBypass()) { + SCLogInfo("Napatech Hardware Bypass requested in conf but is not available."); + exit(EXIT_FAILURE); + } else { + SCLogInfo("Napatech Hardware Bypass disabled."); + } + + if (is_inline) { + SCLogError(SC_ERR_RUNMODE, "Napatech inline mode not supported. (Only available when Hardware Bypass support is enabled.)"); + exit(EXIT_FAILURE); + } +#endif + + if (is_inline) { + SCLogInfo("Napatech configured for inline mode."); + } else { + + SCLogInfo("Napatech configured for passive (non-inline) mode."); + } /* When not using the default streams we need to parse * the array of streams from the conf @@ -947,6 +1471,9 @@ uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream, ConfNode *port; enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED; + if (NapatechUseHWBypass()) { + SCLogInfo("Listening on the following Napatech ports:"); + } /* Build the NTPL command using values in the config file. */ TAILQ_FOREACH(port, &ntports->head, next) { @@ -956,78 +1483,378 @@ uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream, exit(EXIT_FAILURE); } - uint8_t start, end; - if (strncmp(port->val, "all", 3) == 0) { - /* check that the sting in the config file is correctly specified */ - if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) { - SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, - "Only one Napatech port specifier type allowed."); - exit(EXIT_FAILURE); - } - stream_spec = CONFIG_SPECIFIER_RANGE; + if (NapatechUseHWBypass()) { +#ifdef NAPATECH_ENABLE_BYPASS + if (strchr(port->val, '-')) { + stream_spec = CONFIG_SPECIFIER_RANGE; - snprintf(ports_spec, sizeof(ports_spec), "all"); - } else if (strchr(port->val, '-')) { - /* check that the sting in the config file is correctly specified */ - if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) { - SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, - "Only one Napatech port specifier type allowed."); - exit(EXIT_FAILURE); - } - stream_spec = CONFIG_SPECIFIER_RANGE; + char copystr[16]; + strlcpy(copystr, port->val, sizeof(copystr)); + ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, copystr); + ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, strchr(copystr, '-')+1); + + if (ports_spec.first[iteration] == ports_spec.second[iteration]) { + if (is_inline) { + SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, + "Error with napatec.ports in conf file. When running in inline mode the two ports specifying a segment must be different."); + exit(EXIT_FAILURE); + } else { + /* SPAN port configuration */ + is_span_port[ports_spec.first[iteration]] = 1; + + if (strlen(span_ports) == 0) { + snprintf(span_ports, sizeof (span_ports), "%d", ports_spec.first[iteration]); + } else { + char temp[16]; + snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]); + strlcat(span_ports, temp, sizeof(span_ports)); + } + + } + } - char copystr[16]; - strlcpy(copystr, port->val, sizeof(copystr)); + if (NapatechGetAdapter(ports_spec.first[iteration]) != NapatechGetAdapter(ports_spec.first[iteration])) { + SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, + "Invalid napatech.ports specification in conf file."); + SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, + "Two ports on a segment must reside on the same adapter. port %d is on adapter %d, port %d is on adapter %d.", + ports_spec.first[iteration], + NapatechGetAdapter(ports_spec.first[iteration]), + ports_spec.second[iteration], + NapatechGetAdapter(ports_spec.second[iteration]) + ); + exit(EXIT_FAILURE); + } - start = atoi(copystr); - end = atoi(strchr(copystr, '-') + 1); - snprintf(ports_spec, sizeof(ports_spec), "port == (%d..%d)", start, end); + NapatechSetPortmap(ports_spec.first[iteration], ports_spec.second[iteration]); + if (ports_spec.first[iteration] == ports_spec.second[iteration]) { + SCLogInfo(" span_port: %d", ports_spec.first[iteration]); + } else { + SCLogInfo(" %s: %d - %d", is_inline ? "inline_ports" : "tap_ports", ports_spec.first[iteration], ports_spec.second[iteration]); + } - } else { - /* check that the sting in the config file is correctly specified */ - if (stream_spec == CONFIG_SPECIFIER_RANGE) { + if (iteration == 0) { + if (ports_spec.first[iteration] == ports_spec.second[iteration]) { + snprintf(ports_spec.str, sizeof (ports_spec.str), "%d", ports_spec.first[iteration]); + } else { + snprintf(ports_spec.str, sizeof (ports_spec.str), "%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]); + } + } else { + char temp[16]; + if (ports_spec.first[iteration] == ports_spec.second[iteration]) { + snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]); + } else { + snprintf(temp, sizeof(temp), ",%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]); + } + strlcat(ports_spec.str, temp, sizeof(ports_spec.str)); + } + } else { SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, - "Napatech port range specifiers cannot be combined with individual stream specifiers."); + "When using hardware flow bypass ports must be specified as segments. E.g. ports: [0-1, 0-2]"); exit(EXIT_FAILURE); } - stream_spec = CONFIG_SPECIFIER_INDIVIDUAL; +#endif + } else { // !NapatechUseHWBypass() + if (strncmp(port->val, "all", 3) == 0) { + /* check that the sting in the config file is correctly specified */ + if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) { + SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, + "Only one Napatech port specifier type is allowed."); + exit(EXIT_FAILURE); + } + stream_spec = CONFIG_SPECIFIER_RANGE; + + ports_spec.all = true; + snprintf(ports_spec.str, sizeof (ports_spec.str), "all"); + } else if (strchr(port->val, '-')) { + /* check that the sting in the config file is correctly specified */ + if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) { + SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, + "Only one Napatech port specifier is allowed when hardware bypass is disabled. (E.g. ports: [0-4], NOT ports: [0-1,2-3])"); + exit(EXIT_FAILURE); + } + stream_spec = CONFIG_SPECIFIER_RANGE; - /* Determine the ports to use on the NTPL assign statement*/ - if (first_iteration) { - snprintf(ports_spec, sizeof(ports_spec), "port==%s", port->val); - first_iteration = false; + char copystr[16]; + strlcpy(copystr, port->val, sizeof (copystr)); + ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, copystr); + ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, strchr(copystr, '-') + 1); + snprintf(ports_spec.str, sizeof (ports_spec.str), "(%d..%d)", ports_spec.first[iteration], ports_spec.second[iteration]); } else { - char temp[PORTS_SPEC_SIZE]; - snprintf(temp, sizeof(temp), "%s,%s",ports_spec,port->val); - snprintf(ports_spec, sizeof(ports_spec), "%s", temp); + /* check that the sting in the config file is correctly specified */ + if (stream_spec == CONFIG_SPECIFIER_RANGE) { + SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG, + "Napatech port range specifiers cannot be combined with individual stream specifiers."); + exit(EXIT_FAILURE); + } + stream_spec = CONFIG_SPECIFIER_INDIVIDUAL; + + ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val); + + /* Determine the ports to use on the NTPL assign statement*/ + if (iteration == 0) { + snprintf(ports_spec.str, sizeof (ports_spec.str), "%s", port->val); + } else { + strlcat(ports_spec.str, ",", sizeof(ports_spec.str)); + strlcat(ports_spec.str, port->val, sizeof(ports_spec.str)); + } } + } // if !NapatechUseHWBypass() + ++iteration; + } /* TAILQ_FOREACH */ + +#ifdef NAPATECH_ENABLE_BYPASS + if (bypass_supported) { + if (is_inline) { + char inline_setup_cmd[512]; + if (first_stream == last_stream) { + snprintf(inline_setup_cmd, sizeof (ntpl_cmd), + "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == %d", + ports_spec.str, first_stream); + } else { + snprintf(inline_setup_cmd, sizeof (ntpl_cmd), + "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == (%d..%d)", + ports_spec.str, first_stream, last_stream); + } + NapatechSetFilter(hconfig, inline_setup_cmd); + } + /* Build the NTPL command */ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=3;streamid=(%d..%d);colormask=0x10000000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + + + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=2;streamid=(%d..%d);colormask=0x11000000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]" + "]= %s%s and (Layer3Protocol==IPV4)", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + + + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=2;streamid=(%d..%d);colormask=0x14000000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer3Protocol==IPV6)", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=2;streamid=(%d..%d);colormask=0x10100000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer4Protocol==TCP)", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=2;streamid=(%d..%d);colormask=0x10200000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]" + "]= %s%s and (Layer4Protocol==UDP)", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + + if (strlen(span_ports) > 0) { + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=2;streamid=(%d..%d);colormask=0x00001000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]" + "]= port==%s", + first_stream, last_stream, span_ports); + NapatechSetFilter(hconfig, ntpl_cmd); } - } - /* Build the NTPL command */ - snprintf(ntpl_cmd, sizeof(ntpl_cmd), "assign[streamid=(%d..%d)] = %s", - first_stream, last_stream, ports_spec); + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "KeyType[name=KT%u]={sw_32_32,sw_16_16}", + NAPATECH_KEYTYPE_IPV4); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[12]/32/32,Layer4Header[0]/16/16)", + NAPATECH_KEYTYPE_IPV4, NAPATECH_KEYTYPE_IPV4); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "KeyType[name=KT%u]={32,32,16,16}", + NAPATECH_KEYTYPE_IPV4_SPAN); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[12]/32,Layer3Header[16]/32,Layer4Header[0]/16,Layer4Header[2]/16)", + NAPATECH_KEYTYPE_IPV4_SPAN, NAPATECH_KEYTYPE_IPV4_SPAN); + NapatechSetFilter(hconfig, ntpl_cmd); + + /* IPv6 5tuple for inline and tap ports */ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "KeyType[name=KT%u]={sw_128_128,sw_16_16}", + NAPATECH_KEYTYPE_IPV6); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[8]/128/128,Layer4Header[0]/16/16)", + NAPATECH_KEYTYPE_IPV6, NAPATECH_KEYTYPE_IPV6); + NapatechSetFilter(hconfig, ntpl_cmd); + + /* IPv6 5tuple for SPAN Ports */ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "KeyType[name=KT%u]={128,128,16,16}", + NAPATECH_KEYTYPE_IPV6_SPAN); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[8]/128,Layer3Header[24]/128,Layer4Header[0]/16,Layer4Header[2]/16)", + NAPATECH_KEYTYPE_IPV6_SPAN, NAPATECH_KEYTYPE_IPV6_SPAN); + NapatechSetFilter(hconfig, ntpl_cmd); + + + int pair; + char ports_ntpl_a[64]; + char ports_ntpl_b[64]; + memset(ports_ntpl_a, 0, sizeof(ports_ntpl_a)); + memset(ports_ntpl_b, 0, sizeof(ports_ntpl_b)); + + for (pair = 0; pair < iteration; ++pair) { + char port_str[8]; + + if (!is_span_port[ports_spec.first[pair]]) { + snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_a) == 0 ? "" : ",", ports_spec.first[pair]); + strlcat(ports_ntpl_a, port_str, sizeof(ports_ntpl_a)); + + snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_b) == 0 ? "" : ",", ports_spec.second[pair]); + strlcat(ports_ntpl_b, port_str, sizeof(ports_ntpl_b)); + } + } - NtNtplInfo_t ntpl_info; + if (strlen(ports_ntpl_a) > 0) { + /* This is the assign for dropping upstream traffic */ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)", + ports_ntpl_a, + NAPATECH_KEYTYPE_IPV4, + NAPATECH_KEYTYPE_IPV4, + NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } - if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) { - NAPATECH_ERROR(SC_ERR_NAPATECH_INIT_FAILED, status); - exit(EXIT_FAILURE); - } + if (strlen(ports_ntpl_b) > 0) { + /* This is the assign for dropping downstream traffic */ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)", + ports_ntpl_b, //ports_spec.str, + NAPATECH_KEYTYPE_IPV4, + NAPATECH_KEYTYPE_IPV4, + NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } - if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, - NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) { - *filter_id = ntpl_info.ntplId; - status = ntpl_info.u.errorData.errCode; - SCLogInfo("NTPL filter assignment \"%s\" returned filter id %4d", - ntpl_cmd, *filter_id); + if (strlen(span_ports) > 0) { + /* This is the assign for dropping SPAN Port traffic */ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)", + span_ports, + NAPATECH_KEYTYPE_IPV4_SPAN, + NAPATECH_KEYTYPE_IPV4_SPAN, + NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } + + if (is_inline) { + for (pair = 0; pair < iteration; ++pair) { + /* This is the assignment for forwarding traffic */ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u)==%u)", + ports_spec.second[pair], + ports_spec.first[pair], + NAPATECH_KEYTYPE_IPV4, + NAPATECH_KEYTYPE_IPV4, + NAPATECH_FLOWTYPE_PASS); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)", + ports_spec.first[pair], + ports_spec.second[pair], + NAPATECH_KEYTYPE_IPV4, + NAPATECH_KEYTYPE_IPV4, + NAPATECH_FLOWTYPE_PASS); + NapatechSetFilter(hconfig, ntpl_cmd); + } + } + + if (strlen(ports_ntpl_a) > 0) { + /* This is the assign for dropping upstream traffic */ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)", + ports_ntpl_a, + NAPATECH_KEYTYPE_IPV6, + NAPATECH_KEYTYPE_IPV6, + NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } + + if (strlen(ports_ntpl_b) > 0) { + /* This is the assign for dropping downstream traffic */ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)", + ports_ntpl_b, //ports_spec.str, + NAPATECH_KEYTYPE_IPV6, + NAPATECH_KEYTYPE_IPV6, + NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } + + if (strlen(span_ports) > 0) { + /* This is the assign for dropping SPAN Port traffic */ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)", + span_ports, + NAPATECH_KEYTYPE_IPV6_SPAN, + NAPATECH_KEYTYPE_IPV6_SPAN, + NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } + + if (is_inline) { + for (pair = 0; pair < iteration; ++pair) { + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u)==%u)", + ports_spec.second[pair], + ports_spec.first[pair], + NAPATECH_KEYTYPE_IPV6, + NAPATECH_KEYTYPE_IPV6, + NAPATECH_FLOWTYPE_PASS); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)", + ports_spec.first[pair], + ports_spec.second[pair], + NAPATECH_KEYTYPE_IPV6, + NAPATECH_KEYTYPE_IPV6, + NAPATECH_FLOWTYPE_PASS); + NapatechSetFilter(hconfig, ntpl_cmd); + } + } } else { - NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status); - status = ntpl_info.u.errorData.errCode; - return false; + if (is_inline) { + SCLogError(SC_WARN_COMPATIBILITY, + "Napatech Inline operation not supported by this FPGA version."); + exit(EXIT_FAILURE); + } + + if (NapatechIsAutoConfigEnabled()){ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d);colormask=0x0] = %s%s", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + } } - SCLogInfo("Host-buffer NUMA assignments: "); +#else /* NAPATECH_ENABLE_BYPASS */ + snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d)] = %s%s", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + +#endif /* !NAPATECH_ENABLE_BYPASS */ + + SCLogConfig("Host-buffer NUMA assignments: "); int numa_nodes[MAX_HOSTBUFFERS]; uint32_t stream_id; for (stream_id = first_stream; stream_id < last_stream; ++stream_id) { @@ -1036,42 +1863,28 @@ uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream, uint32_t num_host_buffers = GetStreamNUMAs(stream_id, numa_nodes); - snprintf(temp1, 256, " stream %d:", stream_id); + snprintf(temp1, 256, " stream %d: ", stream_id); for (uint32_t hb_id = 0; hb_id < num_host_buffers; ++hb_id) { - snprintf(temp2, 256, "%s %d ", temp1, numa_nodes[hb_id]); - snprintf(temp1, 256, "%s", temp2); + snprintf(temp2, 256, "%d ", numa_nodes[hb_id]); + strlcat(temp1, temp2, sizeof(temp1)); } - SCLogInfo("%s", temp1); + SCLogConfig("%s", temp1); } - return status; -} - -bool NapatechDeleteFilter(uint32_t filter_id) -{ - uint32_t status = 0; - static NtConfigStream_t hconfig; - char ntpl_cmd[64]; - NtNtplInfo_t ntpl_info; - - /* issue an NTPL command to delete the filter */ - snprintf(ntpl_cmd, 64, "delete = %d", filter_id); - - if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) { - NAPATECH_ERROR(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED, status); - exit(EXIT_FAILURE); - } - - if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, - NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) { - status = ntpl_info.ntplId; - SCLogInfo("Removed Napatech filter %d. ", filter_id); + if (first_stream == last_stream) { + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "Setup[state=active] = StreamId == %d", + first_stream); } else { - NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status); - status = 0; + snprintf(ntpl_cmd, sizeof (ntpl_cmd), + "Setup[state=active] = StreamId == (%d..%d)", + first_stream, last_stream); } + NapatechSetFilter(hconfig, ntpl_cmd); + + NT_ConfigClose(hconfig); return status; } diff --git a/src/util-napatech.h b/src/util-napatech.h index d62357aa3dcd..93dca761c3a5 100644 --- a/src/util-napatech.h +++ b/src/util-napatech.h @@ -14,43 +14,52 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ - /** * \file * * \author Phil Young * */ - #ifndef __UTIL_NAPATECH_H__ #define __UTIL_NAPATECH_H__ #ifdef HAVE_NAPATECH #include -typedef struct NapatechPacketVars_ { +typedef struct NapatechPacketVars_ +{ uint64_t stream_id; NtNetBuf_t nt_packet_buf; ThreadVars *tv; +#ifdef NAPATECH_ENABLE_BYPASS + NtDyn3Descr_t *dyn3; + int bypass; +#endif } NapatechPacketVars; -typedef struct NapatechStreamConfig_ { +typedef struct NapatechStreamConfig_ +{ uint16_t stream_id; bool is_active; bool initialized; } NapatechStreamConfig; -typedef struct NapatechCurrentStats_ { +typedef struct NapatechCurrentStats_ +{ uint64_t current_packets; - uint64_t current_drops; uint64_t current_bytes; + uint64_t current_drop_packets; + uint64_t current_drop_bytes; } NapatechCurrentStats; +#define MAX_HOSTBUFFER 4 #define MAX_STREAMS 256 +#define MAX_PORTS 80 +#define MAX_ADAPTERS 8 +#define HB_HIGHWATER 2048 //1982 extern void NapatechStartStats(void); - #define NAPATECH_ERROR(err_type, status) { \ char errorBuffer[1024]; \ NT_ExplainError((status), errorBuffer, sizeof (errorBuffer) - 1); \ @@ -75,11 +84,39 @@ extern void NapatechStartStats(void); " %s", ntpl_info.u.errorData.errBuffer[2]); \ } +// #define ENABLE_NT_DEBUG +#ifdef ENABLE_NT_DEBUG + void NapatechPrintIP(uint32_t address); + + #define NAPATECH_DEBUG(...) printf(__VA_ARGS__) + #define NAPATECH_PRINTIP(a) NapatechPrintIP(uint32_t address) +#else + #define NAPATECH_DEBUG(...) + #define NAPATECH_PRINTIP(a) +#endif NapatechCurrentStats NapatechGetCurrentStats(uint16_t id); int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]); bool NapatechSetupNuma(uint32_t stream, uint32_t numa); -uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream, uint32_t *filter_id, uint32_t *hash_id); -bool NapatechDeleteFilter(uint32_t filter_id); -#endif //HAVE_NAPATECH +uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream); +uint32_t NapatechDeleteFilters(void); + +#ifdef NAPATECH_ENABLE_BYPASS + +/* */ +#define NAPATECH_KEYTYPE_IPV4 3 +#define NAPATECH_KEYTYPE_IPV4_SPAN 4 +#define NAPATECH_KEYTYPE_IPV6 5 +#define NAPATECH_KEYTYPE_IPV6_SPAN 6 +#define NAPATECH_FLOWTYPE_DROP 7 +#define NAPATECH_FLOWTYPE_PASS 8 + +int NapatechInitFlowStreams(void); +NtFlowStream_t *NapatechGetFlowStreamPtr(int device); +int NapatechCloseFlowStreams(void); + +int NapatechIsBypassSupported(void); + +#endif /* NAPATECH_ENABLE_BYPASS */ +#endif /* HAVE_NAPATECH */ #endif /* __UTIL_NAPATECH_H__ */ diff --git a/suricata.yaml.in b/suricata.yaml.in index aacad7411d79..544a3cea566a 100644 --- a/suricata.yaml.in +++ b/suricata.yaml.in @@ -1765,21 +1765,55 @@ napatech: # streams: ["0-3"] + # Stream stats can be enabled to provide fine grain packet and byte counters + # for each thread/stream that is configured. + # + enable-stream-stats: no + # When auto-config is enabled the streams will be created and assigned # automatically to the NUMA node where the thread resides. If cpu-affinity # is enabled in the threading section. Then the streams will be created # according to the number of worker threads specified in the worker cpu set. # Otherwise, the streams array is used to define the streams. # - # This option cannot be used simultaneous with "use-all-streams". + # This option is intended primarily to support legacy configurations. + # + # This option cannot be used simultaneous with either "use-all-streams" + # or hardware-bypass. # auto-config: yes + # Enable hardware level flow bypass. + # + hardware-bypass: yes + + # Enable inline operation. When enabled traffic arriving on a given port is + # automatically forwarded out it's peer port after analysis by suricata. + # + inline: no + # Ports indicates which napatech ports are to be used in auto-config mode. # these are the port ID's of the ports that will be merged prior to the # traffic being distributed to the streams. # - # This can be specified in any of the following ways: + # When hardware-bypass is enabled the ports must be configured as a segement + # specify the port(s) on which upstream and downstream traffic will arrive. + # This information is necessary for the hardware to properly process flows. + # + # When using a tap configuration one of the ports will receive inbound traffic + # for the network and the other will receive outbound traffic. The two ports on a + # given segment must reside on the same network adapter. + # + # When using a SPAN-port configuration the upstream and downstream traffic + # arrives on a single port. This is configured by setting the two sides of the + # segment to reference the same port. (e.g. 0-0 to configure a SPAN port on + # port 0). + # + # port segments are specified in the form: + # ports: [0-1,2-3,4-5,6-6,7-7] + # + # For legecy systems when hardware-bypass is disabled this can be specified in any + # of the following ways: # # a list of individual ports (e.g. ports: [0,1,2,3]) # @@ -1788,9 +1822,9 @@ napatech: # "all" to indicate that all ports are to be merged together # (e.g. ports: [all]) # - # This has no effect if auto-config is disabled. + # This parameter has no effect if auto-config is disabled. # - ports: [all] + ports: [0-1,2-3] # When auto-config is enabled the hashmode specifies the algorithm for # determining to which stream a given packet is to be delivered. @@ -1801,7 +1835,7 @@ napatech: # # See Napatech NTPL documentation other hashmodes and details on their use. # - # This has no effect if auto-config is disabled. + # This parameter has no effect if auto-config is disabled. # hashmode: hash5tuplesorted